aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c6
-rw-r--r--arch/i386/kernel/smpboot.c11
-rw-r--r--arch/i386/kernel/tsc.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c6
-rw-r--r--arch/x86_64/kernel/mce.c6
-rw-r--r--arch/x86_64/kernel/smpboot.c12
-rw-r--r--arch/x86_64/kernel/time.c4
-rw-r--r--block/as-iosched.c7
-rw-r--r--block/cfq-iosched.c8
-rw-r--r--block/ll_rw_blk.c8
-rw-r--r--crypto/cryptomgr.c7
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/ata/libata-core.c25
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/libata-scsi.c14
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/atm/idt77252.c9
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/floppy.c10
-rw-r--r--drivers/block/paride/pd.c8
-rw-r--r--drivers/block/paride/pseudo.h10
-rw-r--r--drivers/block/sx8.c7
-rw-r--r--drivers/block/ub.c8
-rw-r--r--drivers/bluetooth/bcm203x.c7
-rw-r--r--drivers/char/cyclades.c9
-rw-r--r--drivers/char/drm/via_dmablit.c6
-rw-r--r--drivers/char/epca.c8
-rw-r--r--drivers/char/esp.c14
-rw-r--r--drivers/char/genrtc.c4
-rw-r--r--drivers/char/hvsi.c16
-rw-r--r--drivers/char/ip2/i2lib.c12
-rw-r--r--drivers/char/ip2/ip2main.c23
-rw-r--r--drivers/char/isicom.c12
-rw-r--r--drivers/char/moxa.c8
-rw-r--r--drivers/char/mxser.c9
-rw-r--r--drivers/char/pcmcia/synclink_cs.c8
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/sonypi.c4
-rw-r--r--drivers/char/specialix.c14
-rw-r--r--drivers/char/synclink.c9
-rw-r--r--drivers/char/synclink_gt.c10
-rw-r--r--drivers/char/synclinkmp.c8
-rw-r--r--drivers/char/sysrq.c4
-rw-r--r--drivers/char/tpm/tpm.c6
-rw-r--r--drivers/char/tty_io.c31
-rw-r--r--drivers/char/vt.c6
-rw-r--r--drivers/connector/cn_queue.c8
-rw-r--r--drivers/connector/connector.c31
-rw-r--r--drivers/cpufreq/cpufreq.c10
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c7
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
-rw-r--r--drivers/i2c/chips/ds1374.c12
-rw-r--r--drivers/ieee1394/hosts.c9
-rw-r--r--drivers/ieee1394/hosts.h2
-rw-r--r--drivers/ieee1394/sbp2.c28
-rw-r--r--drivers/ieee1394/sbp2.h2
-rw-r--r--drivers/infiniband/core/addr.c6
-rw-r--r--drivers/infiniband/core/cache.c7
-rw-r--r--drivers/infiniband/core/cm.c19
-rw-r--r--drivers/infiniband/core/cma.c10
-rw-r--r--drivers/infiniband/core/iwcm.c6
-rw-r--r--drivers/infiniband/core/mad.c25
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/core/mad_rmpp.c18
-rw-r--r--drivers/infiniband/core/sa_query.c10
-rw-r--r--drivers/infiniband/core/uverbs_mem.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c22
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/input/keyboard/atkbd.c6
-rw-r--r--drivers/input/keyboard/lkkbd.c6
-rw-r--r--drivers/input/keyboard/sunkbd.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c7
-rw-r--r--drivers/input/serio/libps2.c6
-rw-r--r--drivers/isdn/act2000/capi.c4
-rw-r--r--drivers/isdn/act2000/capi.h2
-rw-r--r--drivers/isdn/act2000/module.c18
-rw-r--r--drivers/isdn/capi/kcapi.c14
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c7
-rw-r--r--drivers/isdn/hisax/config.c9
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c5
-rw-r--r--drivers/isdn/hisax/hfc_2bds0.c9
-rw-r--r--drivers/isdn/hisax/hfc_pci.c6
-rw-r--r--drivers/isdn/hisax/hfc_sx.c6
-rw-r--r--drivers/isdn/hisax/icc.c6
-rw-r--r--drivers/isdn/hisax/isac.c6
-rw-r--r--drivers/isdn/hisax/isar.c6
-rw-r--r--drivers/isdn/hisax/isdnl1.c6
-rw-r--r--drivers/isdn/hisax/w6692.c6
-rw-r--r--drivers/isdn/i4l/isdn_net.c6
-rw-r--r--drivers/isdn/pcbit/drv.c4
-rw-r--r--drivers/isdn/pcbit/layer2.c6
-rw-r--r--drivers/isdn/pcbit/pcbit.h2
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/md/dm-crypt.c8
-rw-r--r--drivers/md/dm-mpath.c18
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-snap.c9
-rw-r--r--drivers/md/kcopyd.c4
-rw-r--r--drivers/media/dvb/b2c2/flexcop-pci.c9
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c18
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c19
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c7
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/video/cpia_pp.c20
-rw-r--r--drivers/media/video/cx88/cx88-input.c6
-rw-r--r--drivers/media/video/ir-kbd-i2c.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-context.c13
-rw-r--r--drivers/media/video/saa6588.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c9
-rw-r--r--drivers/message/fusion/mptfc.c14
-rw-r--r--drivers/message/fusion/mptlan.c29
-rw-r--r--drivers/message/fusion/mptsas.c25
-rw-r--r--drivers/message/fusion/mptspi.c14
-rw-r--r--drivers/message/i2o/driver.c2
-rw-r--r--drivers/message/i2o/exec-osm.c13
-rw-r--r--drivers/message/i2o/i2o_block.c15
-rw-r--r--drivers/message/i2o/i2o_block.h2
-rw-r--r--drivers/misc/tifm_7xx1.c18
-rw-r--r--drivers/mmc/mmc.c14
-rw-r--r--drivers/mmc/mmc.h2
-rw-r--r--drivers/mmc/mmc_sysfs.c10
-rw-r--r--drivers/mmc/tifm_sd.c28
-rw-r--r--drivers/net/8139too.c26
-rw-r--r--drivers/net/bnx2.c6
-rw-r--r--drivers/net/cassini.c6
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/cxgb2.c16
-rw-r--r--drivers/net/e100.c8
-rw-r--r--drivers/net/e1000/e1000_main.c10
-rw-r--r--drivers/net/ehea/ehea_main.c9
-rw-r--r--drivers/net/hamradio/baycom_epp.c14
-rw-r--r--drivers/net/irda/mcs7780.c6
-rw-r--r--drivers/net/irda/sir-dev.h2
-rw-r--r--drivers/net/irda/sir_dev.c8
-rw-r--r--drivers/net/iseries_veth.c12
-rw-r--r--drivers/net/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/ns83820.c10
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c12
-rw-r--r--drivers/net/phy/phy.c9
-rw-r--r--drivers/net/plip.c38
-rw-r--r--drivers/net/qla3xxx.c20
-rw-r--r--drivers/net/qla3xxx.h4
-rw-r--r--drivers/net/r8169.c23
-rw-r--r--drivers/net/s2io.c16
-rw-r--r--drivers/net/s2io.h2
-rw-r--r--drivers/net/sis190.c13
-rw-r--r--drivers/net/skge.c15
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/spider_net.c9
-rw-r--r--drivers/net/sungem.c6
-rw-r--r--drivers/net/tg3.c6
-rw-r--r--drivers/net/tlan.c23
-rw-r--r--drivers/net/tlan.h1
-rw-r--r--drivers/net/tulip/21142.c7
-rw-r--r--drivers/net/tulip/timer.c7
-rw-r--r--drivers/net/tulip/tulip.h7
-rw-r--r--drivers/net/tulip/tulip_core.c3
-rw-r--r--drivers/net/wan/pc300_tty.c23
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c20
-rw-r--r--drivers/net/wireless/hostap/hostap.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c19
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c21
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c8
-rw-r--r--drivers/net/wireless/ipw2100.c47
-rw-r--r--drivers/net/wireless/ipw2100.h10
-rw-r--r--drivers/net/wireless/ipw2200.c227
-rw-r--r--drivers/net/wireless/ipw2200.h16
-rw-r--r--drivers/net/wireless/orinoco.c28
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c8
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h4
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c5
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c7
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h2
-rw-r--r--drivers/oprofile/cpu_buffer.c9
-rw-r--r--drivers/oprofile/cpu_buffer.h2
-rw-r--r--drivers/pci/hotplug/shpchp.h4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c2
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c19
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c8
-rw-r--r--drivers/pcmcia/ds.c7
-rw-r--r--drivers/rtc/rtc-dev.c7
-rw-r--r--drivers/scsi/NCR5380.c11
-rw-r--r--drivers/scsi/NCR5380.h4
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/imm.c12
-rw-r--r--drivers/scsi/ipr.c9
-rw-r--r--drivers/scsi/libiscsi.c7
-rw-r--r--drivers/scsi/libsas/sas_discover.c22
-rw-r--r--drivers/scsi/libsas/sas_event.c14
-rw-r--r--drivers/scsi/libsas/sas_init.c6
-rw-r--r--drivers/scsi/libsas/sas_internal.h12
-rw-r--r--drivers/scsi/libsas/sas_phy.c45
-rw-r--r--drivers/scsi/libsas/sas_port.c30
-rw-r--r--drivers/scsi/ppa.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c7
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/scsi_transport_spi.c7
-rw-r--r--drivers/spi/spi_bitbang.c7
-rw-r--r--drivers/usb/atm/cxacru.c12
-rw-r--r--drivers/usb/atm/speedtch.c15
-rw-r--r--drivers/usb/atm/ueagle-atm.c6
-rw-r--r--drivers/usb/class/cdc-acm.c6
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/message.c7
-rw-r--r--drivers/usb/core/usb.c9
-rw-r--r--drivers/usb/gadget/ether.c6
-rw-r--r--drivers/usb/host/u132-hcd.c62
-rw-r--r--drivers/usb/input/hid-core.c7
-rw-r--r--drivers/usb/misc/ftdi-elan.c86
-rw-r--r--drivers/usb/misc/phidgetkit.c21
-rw-r--r--drivers/usb/misc/phidgetmotorcontrol.c11
-rw-r--r--drivers/usb/net/kaweth.c9
-rw-r--r--drivers/usb/net/pegasus.c6
-rw-r--r--drivers/usb/net/pegasus.h2
-rw-r--r--drivers/usb/net/usbnet.c7
-rw-r--r--drivers/usb/serial/aircable.c13
-rw-r--r--drivers/usb/serial/digi_acceleport.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.c19
-rw-r--r--drivers/usb/serial/keyspan_pda.c22
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/serial/whiteheat.c15
-rw-r--r--drivers/video/console/fbcon.c6
-rw-r--r--fs/9p/mux.c16
-rw-r--r--fs/aio.c16
-rw-r--r--fs/bio.c6
-rw-r--r--fs/file.c6
-rw-r--r--fs/gfs2/glock.c8
-rw-r--r--fs/ncpfs/inode.c8
-rw-r--r--fs/ncpfs/sock.c20
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/namespace.c8
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4renewd.c5
-rw-r--r--fs/nfsd/nfs4state.c7
-rw-r--r--fs/ocfs2/alloc.c9
-rw-r--r--fs/ocfs2/cluster/heartbeat.c10
-rw-r--r--fs/ocfs2/cluster/quorum.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c78
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h8
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c5
-rw-r--r--fs/ocfs2/dlm/userdlm.c10
-rw-r--r--fs/ocfs2/journal.c7
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/reiserfs/journal.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c21
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c9
-rw-r--r--include/linux/aio.h2
-rw-r--r--include/linux/connector.h4
-rw-r--r--include/linux/i2o.h2
-rw-r--r--include/linux/kbd_kern.h2
-rw-r--r--include/linux/libata.h7
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/ncp_fs_sb.h8
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/reiserfs_fs_sb.h3
-rw-r--r--include/linux/relay.h2
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h2
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/workqueue.h145
-rw-r--r--include/net/ieee80211softmac.h4
-rw-r--r--include/net/inet_timewait_sock.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/scsi/libsas.h23
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/scsi/scsi_transport_iscsi.h2
-rw-r--r--include/sound/ac97_codec.h2
-rw-r--r--include/sound/ak4114.h2
-rw-r--r--ipc/util.c7
-rw-r--r--kernel/kmod.c16
-rw-r--r--kernel/kthread.c13
-rw-r--r--kernel/power/poweroff.c4
-rw-r--r--kernel/relay.c10
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/workqueue.c109
-rw-r--r--mm/slab.c12
-rw-r--r--mm/swap.c4
-rw-r--r--net/atm/lec.c9
-rw-r--r--net/atm/lec.h2
-rw-r--r--net/bluetooth/hci_sysfs.c12
-rw-r--r--net/bridge/br_if.c10
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/core/link_watch.c13
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/dccp/minisocks.c3
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c18
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c23
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_event.c12
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c4
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_priv.h13
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_scan.c13
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c6
-rw-r--r--net/ipv4/inet_timewait_sock.c5
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c6
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/irda/ircomm/ircomm_tty.c11
-rw-r--r--net/sctp/associola.c11
-rw-r--r--net/sctp/endpointola.c10
-rw-r--r--net/sctp/inqueue.c9
-rw-r--r--net/sunrpc/cache.c8
-rw-r--r--net/sunrpc/rpc_pipe.c8
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/sunrpc/xprtsock.c20
-rw-r--r--net/xfrm/xfrm_policy.c8
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--security/keys/key.c6
-rw-r--r--sound/aoa/aoa-gpio.h2
-rw-r--r--sound/aoa/core/snd-aoa-gpio-feature.c16
-rw-r--r--sound/aoa/core/snd-aoa-gpio-pmf.c16
-rw-r--r--sound/i2c/other/ak4114.c8
-rw-r--r--sound/pci/ac97/ac97_codec.c7
-rw-r--r--sound/pci/hda/hda_codec.c10
-rw-r--r--sound/pci/hda/hda_local.h1
-rw-r--r--sound/ppc/tumbler.c8
339 files changed, 2189 insertions, 1730 deletions
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 1f9153ae5b03..6b5d3518a1c0 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -51,10 +51,10 @@ static void mce_checkregs (void *info)
51 } 51 }
52} 52}
53 53
54static void mce_work_fn(void *data); 54static void mce_work_fn(struct work_struct *work);
55static DECLARE_WORK(mce_work, mce_work_fn, NULL); 55static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
56 56
57static void mce_work_fn(void *data) 57static void mce_work_fn(struct work_struct *work)
58{ 58{
59 on_each_cpu(mce_checkregs, NULL, 1, 1); 59 on_each_cpu(mce_checkregs, NULL, 1, 1);
60 schedule_delayed_work(&mce_work, MCE_RATE); 60 schedule_delayed_work(&mce_work, MCE_RATE);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 4bb8b77cd65b..02a9b66b6ac3 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1049,13 +1049,15 @@ void cpu_exit_clear(void)
1049 1049
1050struct warm_boot_cpu_info { 1050struct warm_boot_cpu_info {
1051 struct completion *complete; 1051 struct completion *complete;
1052 struct work_struct task;
1052 int apicid; 1053 int apicid;
1053 int cpu; 1054 int cpu;
1054}; 1055};
1055 1056
1056static void __cpuinit do_warm_boot_cpu(void *p) 1057static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
1057{ 1058{
1058 struct warm_boot_cpu_info *info = p; 1059 struct warm_boot_cpu_info *info =
1060 container_of(work, struct warm_boot_cpu_info, task);
1059 do_boot_cpu(info->apicid, info->cpu); 1061 do_boot_cpu(info->apicid, info->cpu);
1060 complete(info->complete); 1062 complete(info->complete);
1061} 1063}
@@ -1064,7 +1066,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1064{ 1066{
1065 DECLARE_COMPLETION_ONSTACK(done); 1067 DECLARE_COMPLETION_ONSTACK(done);
1066 struct warm_boot_cpu_info info; 1068 struct warm_boot_cpu_info info;
1067 struct work_struct task;
1068 int apicid, ret; 1069 int apicid, ret;
1069 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); 1070 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
1070 1071
@@ -1089,7 +1090,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1089 info.complete = &done; 1090 info.complete = &done;
1090 info.apicid = apicid; 1091 info.apicid = apicid;
1091 info.cpu = cpu; 1092 info.cpu = cpu;
1092 INIT_WORK(&task, do_warm_boot_cpu, &info); 1093 INIT_WORK(&info.task, do_warm_boot_cpu);
1093 1094
1094 tsc_sync_disabled = 1; 1095 tsc_sync_disabled = 1;
1095 1096
@@ -1097,7 +1098,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1097 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, 1098 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
1098 KERNEL_PGD_PTRS); 1099 KERNEL_PGD_PTRS);
1099 flush_tlb_all(); 1100 flush_tlb_all();
1100 schedule_work(&task); 1101 schedule_work(&info.task);
1101 wait_for_completion(&done); 1102 wait_for_completion(&done);
1102 1103
1103 tsc_sync_disabled = 0; 1104 tsc_sync_disabled = 0;
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index fbc95828cd74..9810c8c90750 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -217,7 +217,7 @@ static unsigned int cpufreq_delayed_issched = 0;
217static unsigned int cpufreq_init = 0; 217static unsigned int cpufreq_init = 0;
218static struct work_struct cpufreq_delayed_get_work; 218static struct work_struct cpufreq_delayed_get_work;
219 219
220static void handle_cpufreq_delayed_get(void *v) 220static void handle_cpufreq_delayed_get(struct work_struct *work)
221{ 221{
222 unsigned int cpu; 222 unsigned int cpu;
223 223
@@ -306,7 +306,7 @@ static int __init cpufreq_tsc(void)
306{ 306{
307 int ret; 307 int ret;
308 308
309 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); 309 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
310 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, 310 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
311 CPUFREQ_TRANSITION_NOTIFIER); 311 CPUFREQ_TRANSITION_NOTIFIER);
312 if (!ret) 312 if (!ret)
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 137077451316..49037edf7d39 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -37,8 +37,8 @@
37/* EEH event workqueue setup. */ 37/* EEH event workqueue setup. */
38static DEFINE_SPINLOCK(eeh_eventlist_lock); 38static DEFINE_SPINLOCK(eeh_eventlist_lock);
39LIST_HEAD(eeh_eventlist); 39LIST_HEAD(eeh_eventlist);
40static void eeh_thread_launcher(void *); 40static void eeh_thread_launcher(struct work_struct *);
41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); 41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
42 42
43/* Serialize reset sequences for a given pci device */ 43/* Serialize reset sequences for a given pci device */
44DEFINE_MUTEX(eeh_event_mutex); 44DEFINE_MUTEX(eeh_event_mutex);
@@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy)
103 * eeh_thread_launcher 103 * eeh_thread_launcher
104 * @dummy - unused 104 * @dummy - unused
105 */ 105 */
106static void eeh_thread_launcher(void *dummy) 106static void eeh_thread_launcher(struct work_struct *dummy)
107{ 107{
108 if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) 108 if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
109 printk(KERN_ERR "Failed to start EEH daemon\n"); 109 printk(KERN_ERR "Failed to start EEH daemon\n");
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index bbea88801d88..c7587fc39015 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
306 */ 306 */
307 307
308static int check_interval = 5 * 60; /* 5 minutes */ 308static int check_interval = 5 * 60; /* 5 minutes */
309static void mcheck_timer(void *data); 309static void mcheck_timer(struct work_struct *work);
310static DECLARE_WORK(mcheck_work, mcheck_timer, NULL); 310static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
311 311
312static void mcheck_check_cpu(void *info) 312static void mcheck_check_cpu(void *info)
313{ 313{
@@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info)
315 do_machine_check(NULL, 0); 315 do_machine_check(NULL, 0);
316} 316}
317 317
318static void mcheck_timer(void *data) 318static void mcheck_timer(struct work_struct *work)
319{ 319{
320 on_each_cpu(mcheck_check_cpu, NULL, 1, 1); 320 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
321 schedule_delayed_work(&mcheck_work, check_interval * HZ); 321 schedule_delayed_work(&mcheck_work, check_interval * HZ);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 62c2e747af58..9800147c4c68 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -753,14 +753,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
753} 753}
754 754
755struct create_idle { 755struct create_idle {
756 struct work_struct work;
756 struct task_struct *idle; 757 struct task_struct *idle;
757 struct completion done; 758 struct completion done;
758 int cpu; 759 int cpu;
759}; 760};
760 761
761void do_fork_idle(void *_c_idle) 762void do_fork_idle(struct work_struct *work)
762{ 763{
763 struct create_idle *c_idle = _c_idle; 764 struct create_idle *c_idle =
765 container_of(work, struct create_idle, work);
764 766
765 c_idle->idle = fork_idle(c_idle->cpu); 767 c_idle->idle = fork_idle(c_idle->cpu);
766 complete(&c_idle->done); 768 complete(&c_idle->done);
@@ -775,10 +777,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
775 int timeout; 777 int timeout;
776 unsigned long start_rip; 778 unsigned long start_rip;
777 struct create_idle c_idle = { 779 struct create_idle c_idle = {
780 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
778 .cpu = cpu, 781 .cpu = cpu,
779 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 782 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
780 }; 783 };
781 DECLARE_WORK(work, do_fork_idle, &c_idle);
782 784
783 /* allocate memory for gdts of secondary cpus. Hotplug is considered */ 785 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
784 if (!cpu_gdt_descr[cpu].address && 786 if (!cpu_gdt_descr[cpu].address &&
@@ -825,9 +827,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
825 * thread. 827 * thread.
826 */ 828 */
827 if (!keventd_up() || current_is_keventd()) 829 if (!keventd_up() || current_is_keventd())
828 work.func(work.data); 830 c_idle.work.func(&c_idle.work);
829 else { 831 else {
830 schedule_work(&work); 832 schedule_work(&c_idle.work);
831 wait_for_completion(&c_idle.done); 833 wait_for_completion(&c_idle.done);
832 } 834 }
833 835
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index e3ef544d2cfb..9f05bc9b2dad 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0;
563static unsigned int cpufreq_init = 0; 563static unsigned int cpufreq_init = 0;
564static struct work_struct cpufreq_delayed_get_work; 564static struct work_struct cpufreq_delayed_get_work;
565 565
566static void handle_cpufreq_delayed_get(void *v) 566static void handle_cpufreq_delayed_get(struct work_struct *v)
567{ 567{
568 unsigned int cpu; 568 unsigned int cpu;
569 for_each_online_cpu(cpu) { 569 for_each_online_cpu(cpu) {
@@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = {
639 639
640static int __init cpufreq_tsc(void) 640static int __init cpufreq_tsc(void)
641{ 641{
642 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); 642 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
643 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, 643 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
644 CPUFREQ_TRANSITION_NOTIFIER)) 644 CPUFREQ_TRANSITION_NOTIFIER))
645 cpufreq_init = 1; 645 cpufreq_init = 1;
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 00242111a457..5934c4bfd52a 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
1274 * 1274 *
1275 * FIXME! dispatch queue is not a queue at all! 1275 * FIXME! dispatch queue is not a queue at all!
1276 */ 1276 */
1277static void as_work_handler(void *data) 1277static void as_work_handler(struct work_struct *work)
1278{ 1278{
1279 struct request_queue *q = data; 1279 struct as_data *ad = container_of(work, struct as_data, antic_work);
1280 struct request_queue *q = ad->q;
1280 unsigned long flags; 1281 unsigned long flags;
1281 1282
1282 spin_lock_irqsave(q->queue_lock, flags); 1283 spin_lock_irqsave(q->queue_lock, flags);
@@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q)
1332 ad->antic_timer.function = as_antic_timeout; 1333 ad->antic_timer.function = as_antic_timeout;
1333 ad->antic_timer.data = (unsigned long)q; 1334 ad->antic_timer.data = (unsigned long)q;
1334 init_timer(&ad->antic_timer); 1335 init_timer(&ad->antic_timer);
1335 INIT_WORK(&ad->antic_work, as_work_handler, q); 1336 INIT_WORK(&ad->antic_work, as_work_handler);
1336 1337
1337 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1338 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1338 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1339 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e9019ed39b73..84e9be073180 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1840,9 +1840,11 @@ queue_fail:
1840 return 1; 1840 return 1;
1841} 1841}
1842 1842
1843static void cfq_kick_queue(void *data) 1843static void cfq_kick_queue(struct work_struct *work)
1844{ 1844{
1845 request_queue_t *q = data; 1845 struct cfq_data *cfqd =
1846 container_of(work, struct cfq_data, unplug_work);
1847 request_queue_t *q = cfqd->queue;
1846 unsigned long flags; 1848 unsigned long flags;
1847 1849
1848 spin_lock_irqsave(q->queue_lock, flags); 1850 spin_lock_irqsave(q->queue_lock, flags);
@@ -1986,7 +1988,7 @@ static void *cfq_init_queue(request_queue_t *q)
1986 cfqd->idle_class_timer.function = cfq_idle_class_timer; 1988 cfqd->idle_class_timer.function = cfq_idle_class_timer;
1987 cfqd->idle_class_timer.data = (unsigned long) cfqd; 1989 cfqd->idle_class_timer.data = (unsigned long) cfqd;
1988 1990
1989 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); 1991 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
1990 1992
1991 cfqd->cfq_quantum = cfq_quantum; 1993 cfqd->cfq_quantum = cfq_quantum;
1992 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 1994 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 0f82e12f7b67..cc6e95f8e5d9 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -34,7 +34,7 @@
34 */ 34 */
35#include <scsi/scsi_cmnd.h> 35#include <scsi/scsi_cmnd.h>
36 36
37static void blk_unplug_work(void *data); 37static void blk_unplug_work(struct work_struct *work);
38static void blk_unplug_timeout(unsigned long data); 38static void blk_unplug_timeout(unsigned long data);
39static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 39static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
40static void init_request_from_bio(struct request *req, struct bio *bio); 40static void init_request_from_bio(struct request *req, struct bio *bio);
@@ -227,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
227 if (q->unplug_delay == 0) 227 if (q->unplug_delay == 0)
228 q->unplug_delay = 1; 228 q->unplug_delay = 1;
229 229
230 INIT_WORK(&q->unplug_work, blk_unplug_work, q); 230 INIT_WORK(&q->unplug_work, blk_unplug_work);
231 231
232 q->unplug_timer.function = blk_unplug_timeout; 232 q->unplug_timer.function = blk_unplug_timeout;
233 q->unplug_timer.data = (unsigned long)q; 233 q->unplug_timer.data = (unsigned long)q;
@@ -1631,9 +1631,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1631 } 1631 }
1632} 1632}
1633 1633
1634static void blk_unplug_work(void *data) 1634static void blk_unplug_work(struct work_struct *work)
1635{ 1635{
1636 request_queue_t *q = data; 1636 request_queue_t *q = container_of(work, request_queue_t, unplug_work);
1637 1637
1638 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 1638 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
1639 q->rq.count[READ] + q->rq.count[WRITE]); 1639 q->rq.count[READ] + q->rq.count[WRITE]);
diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c
index 9b5b15601068..2ebffb84f1d9 100644
--- a/crypto/cryptomgr.c
+++ b/crypto/cryptomgr.c
@@ -40,9 +40,10 @@ struct cryptomgr_param {
40 char template[CRYPTO_MAX_ALG_NAME]; 40 char template[CRYPTO_MAX_ALG_NAME];
41}; 41};
42 42
43static void cryptomgr_probe(void *data) 43static void cryptomgr_probe(struct work_struct *work)
44{ 44{
45 struct cryptomgr_param *param = data; 45 struct cryptomgr_param *param =
46 container_of(work, struct cryptomgr_param, work);
46 struct crypto_template *tmpl; 47 struct crypto_template *tmpl;
47 struct crypto_instance *inst; 48 struct crypto_instance *inst;
48 int err; 49 int err;
@@ -112,7 +113,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
112 param->larval.type = larval->alg.cra_flags; 113 param->larval.type = larval->alg.cra_flags;
113 param->larval.mask = larval->mask; 114 param->larval.mask = larval->mask;
114 115
115 INIT_WORK(&param->work, cryptomgr_probe, param); 116 INIT_WORK(&param->work, cryptomgr_probe);
116 schedule_work(&param->work); 117 schedule_work(&param->work);
117 118
118 return NOTIFY_STOP; 119 return NOTIFY_STOP;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 068fe4f100b0..02b30ae6a68e 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -50,6 +50,7 @@ ACPI_MODULE_NAME("osl")
50struct acpi_os_dpc { 50struct acpi_os_dpc {
51 acpi_osd_exec_callback function; 51 acpi_osd_exec_callback function;
52 void *context; 52 void *context;
53 struct work_struct work;
53}; 54};
54 55
55#ifdef CONFIG_ACPI_CUSTOM_DSDT 56#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -564,12 +565,9 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
564 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); 565 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
565} 566}
566 567
567static void acpi_os_execute_deferred(void *context) 568static void acpi_os_execute_deferred(struct work_struct *work)
568{ 569{
569 struct acpi_os_dpc *dpc = NULL; 570 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
570
571
572 dpc = (struct acpi_os_dpc *)context;
573 if (!dpc) { 571 if (!dpc) {
574 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 572 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
575 return; 573 return;
@@ -602,7 +600,6 @@ acpi_status acpi_os_execute(acpi_execute_type type,
602{ 600{
603 acpi_status status = AE_OK; 601 acpi_status status = AE_OK;
604 struct acpi_os_dpc *dpc; 602 struct acpi_os_dpc *dpc;
605 struct work_struct *task;
606 603
607 ACPI_FUNCTION_TRACE("os_queue_for_execution"); 604 ACPI_FUNCTION_TRACE("os_queue_for_execution");
608 605
@@ -615,28 +612,22 @@ acpi_status acpi_os_execute(acpi_execute_type type,
615 612
616 /* 613 /*
617 * Allocate/initialize DPC structure. Note that this memory will be 614 * Allocate/initialize DPC structure. Note that this memory will be
618 * freed by the callee. The kernel handles the tq_struct list in a 615 * freed by the callee. The kernel handles the work_struct list in a
619 * way that allows us to also free its memory inside the callee. 616 * way that allows us to also free its memory inside the callee.
620 * Because we may want to schedule several tasks with different 617 * Because we may want to schedule several tasks with different
621 * parameters we can't use the approach some kernel code uses of 618 * parameters we can't use the approach some kernel code uses of
622 * having a static tq_struct. 619 * having a static work_struct.
623 * We can save time and code by allocating the DPC and tq_structs
624 * from the same memory.
625 */ 620 */
626 621
627 dpc = 622 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
628 kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
629 GFP_ATOMIC);
630 if (!dpc) 623 if (!dpc)
631 return_ACPI_STATUS(AE_NO_MEMORY); 624 return_ACPI_STATUS(AE_NO_MEMORY);
632 625
633 dpc->function = function; 626 dpc->function = function;
634 dpc->context = context; 627 dpc->context = context;
635 628
636 task = (void *)(dpc + 1); 629 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
637 INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); 630 if (!queue_work(kacpid_wq, &dpc->work)) {
638
639 if (!queue_work(kacpid_wq, task)) {
640 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 631 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
641 "Call to queue_work() failed.\n")); 632 "Call to queue_work() failed.\n"));
642 kfree(dpc); 633 kfree(dpc);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 915a55a6cc14..b5f2da6ac80e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -914,7 +914,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
914 * ata_port_queue_task - Queue port_task 914 * ata_port_queue_task - Queue port_task
915 * @ap: The ata_port to queue port_task for 915 * @ap: The ata_port to queue port_task for
916 * @fn: workqueue function to be scheduled 916 * @fn: workqueue function to be scheduled
917 * @data: data value to pass to workqueue function 917 * @data: data for @fn to use
918 * @delay: delay time for workqueue function 918 * @delay: delay time for workqueue function
919 * 919 *
920 * Schedule @fn(@data) for execution after @delay jiffies using 920 * Schedule @fn(@data) for execution after @delay jiffies using
@@ -929,7 +929,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
929 * LOCKING: 929 * LOCKING:
930 * Inherited from caller. 930 * Inherited from caller.
931 */ 931 */
932void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, 932void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
933 unsigned long delay) 933 unsigned long delay)
934{ 934{
935 int rc; 935 int rc;
@@ -937,12 +937,10 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) 937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
938 return; 938 return;
939 939
940 PREPARE_WORK(&ap->port_task, fn, data); 940 PREPARE_DELAYED_WORK(&ap->port_task, fn);
941 ap->port_task_data = data;
941 942
942 if (!delay) 943 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
943 rc = queue_work(ata_wq, &ap->port_task);
944 else
945 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
946 944
947 /* rc == 0 means that another user is using port task */ 945 /* rc == 0 means that another user is using port task */
948 WARN_ON(rc == 0); 946 WARN_ON(rc == 0);
@@ -4295,10 +4293,11 @@ fsm_start:
4295 return poll_next; 4293 return poll_next;
4296} 4294}
4297 4295
4298static void ata_pio_task(void *_data) 4296static void ata_pio_task(struct work_struct *work)
4299{ 4297{
4300 struct ata_queued_cmd *qc = _data; 4298 struct ata_port *ap =
4301 struct ata_port *ap = qc->ap; 4299 container_of(work, struct ata_port, port_task.work);
4300 struct ata_queued_cmd *qc = ap->port_task_data;
4302 u8 status; 4301 u8 status;
4303 int poll_next; 4302 int poll_next;
4304 4303
@@ -5320,9 +5319,9 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
5320 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5319 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5321#endif 5320#endif
5322 5321
5323 INIT_WORK(&ap->port_task, NULL, NULL); 5322 INIT_DELAYED_WORK(&ap->port_task, NULL);
5324 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); 5323 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5325 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); 5324 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5326 INIT_LIST_HEAD(&ap->eh_done_q); 5325 INIT_LIST_HEAD(&ap->eh_done_q);
5327 init_waitqueue_head(&ap->eh_wait_q); 5326 init_waitqueue_head(&ap->eh_wait_q);
5328 5327
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 02b2b2787d9b..9f6b7cc74fd9 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host)
332 if (ap->pflags & ATA_PFLAG_LOADING) 332 if (ap->pflags & ATA_PFLAG_LOADING)
333 ap->pflags &= ~ATA_PFLAG_LOADING; 333 ap->pflags &= ~ATA_PFLAG_LOADING;
334 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 334 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
335 queue_work(ata_aux_wq, &ap->hotplug_task); 335 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
336 336
337 if (ap->pflags & ATA_PFLAG_RECOVERED) 337 if (ap->pflags & ATA_PFLAG_RECOVERED)
338 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 338 ata_port_printk(ap, KERN_INFO, "EH complete\n");
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 47ea111d5ace..4c32d93d44b1 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3081,7 +3081,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3081 3081
3082/** 3082/**
3083 * ata_scsi_hotplug - SCSI part of hotplug 3083 * ata_scsi_hotplug - SCSI part of hotplug
3084 * @data: Pointer to ATA port to perform SCSI hotplug on 3084 * @work: Pointer to ATA port to perform SCSI hotplug on
3085 * 3085 *
3086 * Perform SCSI part of hotplug. It's executed from a separate 3086 * Perform SCSI part of hotplug. It's executed from a separate
3087 * workqueue after EH completes. This is necessary because SCSI 3087 * workqueue after EH completes. This is necessary because SCSI
@@ -3091,9 +3091,10 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3091 * LOCKING: 3091 * LOCKING:
3092 * Kernel thread context (may sleep). 3092 * Kernel thread context (may sleep).
3093 */ 3093 */
3094void ata_scsi_hotplug(void *data) 3094void ata_scsi_hotplug(struct work_struct *work)
3095{ 3095{
3096 struct ata_port *ap = data; 3096 struct ata_port *ap =
3097 container_of(work, struct ata_port, hotplug_task.work);
3097 int i; 3098 int i;
3098 3099
3099 if (ap->pflags & ATA_PFLAG_UNLOADING) { 3100 if (ap->pflags & ATA_PFLAG_UNLOADING) {
@@ -3192,7 +3193,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3192 3193
3193/** 3194/**
3194 * ata_scsi_dev_rescan - initiate scsi_rescan_device() 3195 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3195 * @data: Pointer to ATA port to perform scsi_rescan_device() 3196 * @work: Pointer to ATA port to perform scsi_rescan_device()
3196 * 3197 *
3197 * After ATA pass thru (SAT) commands are executed successfully, 3198 * After ATA pass thru (SAT) commands are executed successfully,
3198 * libata need to propagate the changes to SCSI layer. This 3199 * libata need to propagate the changes to SCSI layer. This
@@ -3202,9 +3203,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3202 * LOCKING: 3203 * LOCKING:
3203 * Kernel thread context (may sleep). 3204 * Kernel thread context (may sleep).
3204 */ 3205 */
3205void ata_scsi_dev_rescan(void *data) 3206void ata_scsi_dev_rescan(struct work_struct *work)
3206{ 3207{
3207 struct ata_port *ap = data; 3208 struct ata_port *ap =
3209 container_of(work, struct ata_port, scsi_rescan_task);
3208 struct ata_device *dev; 3210 struct ata_device *dev;
3209 unsigned int i; 3211 unsigned int i;
3210 3212
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0ed263be652a..7e0f3aff873d 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -81,7 +81,7 @@ extern struct scsi_transport_template ata_scsi_transport_template;
81 81
82extern void ata_scsi_scan_host(struct ata_port *ap); 82extern void ata_scsi_scan_host(struct ata_port *ap);
83extern int ata_scsi_offline_dev(struct ata_device *dev); 83extern int ata_scsi_offline_dev(struct ata_device *dev);
84extern void ata_scsi_hotplug(void *data); 84extern void ata_scsi_hotplug(struct work_struct *work);
85extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 85extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
86 unsigned int buflen); 86 unsigned int buflen);
87 87
@@ -111,7 +111,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
111 unsigned int (*actor) (struct ata_scsi_args *args, 111 unsigned int (*actor) (struct ata_scsi_args *args,
112 u8 *rbuf, unsigned int buflen)); 112 u8 *rbuf, unsigned int buflen));
113extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 113extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
114extern void ata_scsi_dev_rescan(void *data); 114extern void ata_scsi_dev_rescan(struct work_struct *work);
115extern int ata_bus_probe(struct ata_port *ap); 115extern int ata_bus_probe(struct ata_port *ap);
116 116
117/* libata-eh.c */ 117/* libata-eh.c */
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 87b17c33b3f9..f40786121948 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -135,7 +135,7 @@ static int idt77252_change_qos(struct atm_vcc *vcc, struct atm_qos *qos,
135 int flags); 135 int flags);
136static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, 136static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
137 char *page); 137 char *page);
138static void idt77252_softint(void *dev_id); 138static void idt77252_softint(struct work_struct *work);
139 139
140 140
141static struct atmdev_ops idt77252_ops = 141static struct atmdev_ops idt77252_ops =
@@ -2866,9 +2866,10 @@ out:
2866} 2866}
2867 2867
2868static void 2868static void
2869idt77252_softint(void *dev_id) 2869idt77252_softint(struct work_struct *work)
2870{ 2870{
2871 struct idt77252_dev *card = dev_id; 2871 struct idt77252_dev *card =
2872 container_of(work, struct idt77252_dev, tqueue);
2872 u32 stat; 2873 u32 stat;
2873 int done; 2874 int done;
2874 2875
@@ -3697,7 +3698,7 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
3697 card->pcidev = pcidev; 3698 card->pcidev = pcidev;
3698 sprintf(card->name, "idt77252-%d", card->index); 3699 sprintf(card->name, "idt77252-%d", card->index);
3699 3700
3700 INIT_WORK(&card->tqueue, idt77252_softint, (void *)card); 3701 INIT_WORK(&card->tqueue, idt77252_softint);
3701 3702
3702 membase = pci_resource_start(pcidev, 1); 3703 membase = pci_resource_start(pcidev, 1);
3703 srambase = pci_resource_start(pcidev, 2); 3704 srambase = pci_resource_start(pcidev, 2);
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 6d111228cfac..2308e83e5f33 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -159,7 +159,7 @@ void aoecmd_work(struct aoedev *d);
159void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); 159void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
160void aoecmd_ata_rsp(struct sk_buff *); 160void aoecmd_ata_rsp(struct sk_buff *);
161void aoecmd_cfg_rsp(struct sk_buff *); 161void aoecmd_cfg_rsp(struct sk_buff *);
162void aoecmd_sleepwork(void *vp); 162void aoecmd_sleepwork(struct work_struct *);
163struct sk_buff *new_skb(ulong); 163struct sk_buff *new_skb(ulong);
164 164
165int aoedev_init(void); 165int aoedev_init(void);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8a13b1af8bab..97f7f535f412 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -408,9 +408,9 @@ rexmit_timer(ulong vp)
408/* this function performs work that has been deferred until sleeping is OK 408/* this function performs work that has been deferred until sleeping is OK
409 */ 409 */
410void 410void
411aoecmd_sleepwork(void *vp) 411aoecmd_sleepwork(struct work_struct *work)
412{ 412{
413 struct aoedev *d = (struct aoedev *) vp; 413 struct aoedev *d = container_of(work, struct aoedev, work);
414 414
415 if (d->flags & DEVFL_GDALLOC) 415 if (d->flags & DEVFL_GDALLOC)
416 aoeblk_gdalloc(d); 416 aoeblk_gdalloc(d);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 6125921bbec4..05a97197c918 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -88,7 +88,7 @@ aoedev_newdev(ulong nframes)
88 kfree(d); 88 kfree(d);
89 return NULL; 89 return NULL;
90 } 90 }
91 INIT_WORK(&d->work, aoecmd_sleepwork, d); 91 INIT_WORK(&d->work, aoecmd_sleepwork);
92 spin_lock_init(&d->lock); 92 spin_lock_init(&d->lock);
93 init_timer(&d->timer); 93 init_timer(&d->timer);
94 d->timer.data = (ulong) d; 94 d->timer.data = (ulong) d;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e6d3a87cbe3..3f1b38276e96 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -992,11 +992,11 @@ static void empty(void)
992{ 992{
993} 993}
994 994
995static DECLARE_WORK(floppy_work, NULL, NULL); 995static DECLARE_WORK(floppy_work, NULL);
996 996
997static void schedule_bh(void (*handler) (void)) 997static void schedule_bh(void (*handler) (void))
998{ 998{
999 PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL); 999 PREPARE_WORK(&floppy_work, (work_func_t)handler);
1000 schedule_work(&floppy_work); 1000 schedule_work(&floppy_work);
1001} 1001}
1002 1002
@@ -1008,7 +1008,7 @@ static void cancel_activity(void)
1008 1008
1009 spin_lock_irqsave(&floppy_lock, flags); 1009 spin_lock_irqsave(&floppy_lock, flags);
1010 do_floppy = NULL; 1010 do_floppy = NULL;
1011 PREPARE_WORK(&floppy_work, (void *)empty, NULL); 1011 PREPARE_WORK(&floppy_work, (work_func_t)empty);
1012 del_timer(&fd_timer); 1012 del_timer(&fd_timer);
1013 spin_unlock_irqrestore(&floppy_lock, flags); 1013 spin_unlock_irqrestore(&floppy_lock, flags);
1014} 1014}
@@ -1868,7 +1868,7 @@ static void show_floppy(void)
1868 printk("fdc_busy=%lu\n", fdc_busy); 1868 printk("fdc_busy=%lu\n", fdc_busy);
1869 if (do_floppy) 1869 if (do_floppy)
1870 printk("do_floppy=%p\n", do_floppy); 1870 printk("do_floppy=%p\n", do_floppy);
1871 if (floppy_work.pending) 1871 if (work_pending(&floppy_work))
1872 printk("floppy_work.func=%p\n", floppy_work.func); 1872 printk("floppy_work.func=%p\n", floppy_work.func);
1873 if (timer_pending(&fd_timer)) 1873 if (timer_pending(&fd_timer))
1874 printk("fd_timer.function=%p\n", fd_timer.function); 1874 printk("fd_timer.function=%p\n", fd_timer.function);
@@ -4498,7 +4498,7 @@ static void floppy_release_irq_and_dma(void)
4498 printk("floppy timer still active:%s\n", timeout_message); 4498 printk("floppy timer still active:%s\n", timeout_message);
4499 if (timer_pending(&fd_timer)) 4499 if (timer_pending(&fd_timer))
4500 printk("auxiliary floppy timer still active\n"); 4500 printk("auxiliary floppy timer still active\n");
4501 if (floppy_work.pending) 4501 if (work_pending(&floppy_work))
4502 printk("work still pending\n"); 4502 printk("work still pending\n");
4503#endif 4503#endif
4504 old_fdc = fdc; 4504 old_fdc = fdc;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 40a11e567970..9d9bff23f426 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -352,19 +352,19 @@ static enum action (*phase)(void);
352 352
353static void run_fsm(void); 353static void run_fsm(void);
354 354
355static void ps_tq_int( void *data); 355static void ps_tq_int(struct work_struct *work);
356 356
357static DECLARE_WORK(fsm_tq, ps_tq_int, NULL); 357static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
358 358
359static void schedule_fsm(void) 359static void schedule_fsm(void)
360{ 360{
361 if (!nice) 361 if (!nice)
362 schedule_work(&fsm_tq); 362 schedule_delayed_work(&fsm_tq, 0);
363 else 363 else
364 schedule_delayed_work(&fsm_tq, nice-1); 364 schedule_delayed_work(&fsm_tq, nice-1);
365} 365}
366 366
367static void ps_tq_int(void *data) 367static void ps_tq_int(struct work_struct *work)
368{ 368{
369 run_fsm(); 369 run_fsm();
370} 370}
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h
index 932342d7a8eb..bc3703294143 100644
--- a/drivers/block/paride/pseudo.h
+++ b/drivers/block/paride/pseudo.h
@@ -35,7 +35,7 @@
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37 37
38static void ps_tq_int( void *data); 38static void ps_tq_int(struct work_struct *work);
39 39
40static void (* ps_continuation)(void); 40static void (* ps_continuation)(void);
41static int (* ps_ready)(void); 41static int (* ps_ready)(void);
@@ -45,7 +45,7 @@ static int ps_nice = 0;
45 45
46static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); 46static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
47 47
48static DECLARE_WORK(ps_tq, ps_tq_int, NULL); 48static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
49 49
50static void ps_set_intr(void (*continuation)(void), 50static void ps_set_intr(void (*continuation)(void),
51 int (*ready)(void), 51 int (*ready)(void),
@@ -63,14 +63,14 @@ static void ps_set_intr(void (*continuation)(void),
63 if (!ps_tq_active) { 63 if (!ps_tq_active) {
64 ps_tq_active = 1; 64 ps_tq_active = 1;
65 if (!ps_nice) 65 if (!ps_nice)
66 schedule_work(&ps_tq); 66 schedule_delayed_work(&ps_tq, 0);
67 else 67 else
68 schedule_delayed_work(&ps_tq, ps_nice-1); 68 schedule_delayed_work(&ps_tq, ps_nice-1);
69 } 69 }
70 spin_unlock_irqrestore(&ps_spinlock,flags); 70 spin_unlock_irqrestore(&ps_spinlock,flags);
71} 71}
72 72
73static void ps_tq_int(void *data) 73static void ps_tq_int(struct work_struct *work)
74{ 74{
75 void (*con)(void); 75 void (*con)(void);
76 unsigned long flags; 76 unsigned long flags;
@@ -92,7 +92,7 @@ static void ps_tq_int(void *data)
92 } 92 }
93 ps_tq_active = 1; 93 ps_tq_active = 1;
94 if (!ps_nice) 94 if (!ps_nice)
95 schedule_work(&ps_tq); 95 schedule_delayed_work(&ps_tq, 0);
96 else 96 else
97 schedule_delayed_work(&ps_tq, ps_nice-1); 97 schedule_delayed_work(&ps_tq, ps_nice-1);
98 spin_unlock_irqrestore(&ps_spinlock,flags); 98 spin_unlock_irqrestore(&ps_spinlock,flags);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 47d6975268ff..54509eb3391b 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1244,9 +1244,10 @@ out:
1244 return IRQ_RETVAL(handled); 1244 return IRQ_RETVAL(handled);
1245} 1245}
1246 1246
1247static void carm_fsm_task (void *_data) 1247static void carm_fsm_task (struct work_struct *work)
1248{ 1248{
1249 struct carm_host *host = _data; 1249 struct carm_host *host =
1250 container_of(work, struct carm_host, fsm_task);
1250 unsigned long flags; 1251 unsigned long flags;
1251 unsigned int state; 1252 unsigned int state;
1252 int rc, i, next_dev; 1253 int rc, i, next_dev;
@@ -1619,7 +1620,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1619 host->pdev = pdev; 1620 host->pdev = pdev;
1620 host->flags = pci_dac ? FL_DAC : 0; 1621 host->flags = pci_dac ? FL_DAC : 0;
1621 spin_lock_init(&host->lock); 1622 spin_lock_init(&host->lock);
1622 INIT_WORK(&host->fsm_task, carm_fsm_task, host); 1623 INIT_WORK(&host->fsm_task, carm_fsm_task);
1623 init_completion(&host->probe_comp); 1624 init_completion(&host->probe_comp);
1624 1625
1625 for (i = 0; i < ARRAY_SIZE(host->req); i++) 1626 for (i = 0; i < ARRAY_SIZE(host->req); i++)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 0d5c73f07265..2098eff91e14 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -376,7 +376,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
376 int stalled_pipe); 376 int stalled_pipe);
377static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 377static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
378static void ub_reset_enter(struct ub_dev *sc, int try); 378static void ub_reset_enter(struct ub_dev *sc, int try);
379static void ub_reset_task(void *arg); 379static void ub_reset_task(struct work_struct *work);
380static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); 380static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
381static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 381static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
382 struct ub_capacity *ret); 382 struct ub_capacity *ret);
@@ -1558,9 +1558,9 @@ static void ub_reset_enter(struct ub_dev *sc, int try)
1558 schedule_work(&sc->reset_work); 1558 schedule_work(&sc->reset_work);
1559} 1559}
1560 1560
1561static void ub_reset_task(void *arg) 1561static void ub_reset_task(struct work_struct *work)
1562{ 1562{
1563 struct ub_dev *sc = arg; 1563 struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1564 unsigned long flags; 1564 unsigned long flags;
1565 struct list_head *p; 1565 struct list_head *p;
1566 struct ub_lun *lun; 1566 struct ub_lun *lun;
@@ -2179,7 +2179,7 @@ static int ub_probe(struct usb_interface *intf,
2179 usb_init_urb(&sc->work_urb); 2179 usb_init_urb(&sc->work_urb);
2180 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2180 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2181 atomic_set(&sc->poison, 0); 2181 atomic_set(&sc->poison, 0);
2182 INIT_WORK(&sc->reset_work, ub_reset_task, sc); 2182 INIT_WORK(&sc->reset_work, ub_reset_task);
2183 init_waitqueue_head(&sc->reset_wait); 2183 init_waitqueue_head(&sc->reset_wait);
2184 2184
2185 init_timer(&sc->work_timer); 2185 init_timer(&sc->work_timer);
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 516751754aa9..9256985cbe36 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -157,9 +157,10 @@ static void bcm203x_complete(struct urb *urb)
157 } 157 }
158} 158}
159 159
160static void bcm203x_work(void *user_data) 160static void bcm203x_work(struct work_struct *work)
161{ 161{
162 struct bcm203x_data *data = user_data; 162 struct bcm203x_data *data =
163 container_of(work, struct bcm203x_data, work);
163 164
164 if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) 165 if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
165 BT_ERR("Can't submit URB"); 166 BT_ERR("Can't submit URB");
@@ -246,7 +247,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
246 247
247 release_firmware(firmware); 248 release_firmware(firmware);
248 249
249 INIT_WORK(&data->work, bcm203x_work, (void *) data); 250 INIT_WORK(&data->work, bcm203x_work);
250 251
251 usb_set_intfdata(intf, data); 252 usb_set_intfdata(intf, data);
252 253
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e608dadece2f..acb2de5e3a98 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -926,9 +926,10 @@ cy_sched_event(struct cyclades_port *info, int event)
926 * had to poll every port to see if that port needed servicing. 926 * had to poll every port to see if that port needed servicing.
927 */ 927 */
928static void 928static void
929do_softint(void *private_) 929do_softint(struct work_struct *work)
930{ 930{
931 struct cyclades_port *info = (struct cyclades_port *) private_; 931 struct cyclades_port *info =
932 container_of(work, struct cyclades_port, tqueue);
932 struct tty_struct *tty; 933 struct tty_struct *tty;
933 934
934 tty = info->tty; 935 tty = info->tty;
@@ -5328,7 +5329,7 @@ cy_init(void)
5328 info->blocked_open = 0; 5329 info->blocked_open = 0;
5329 info->default_threshold = 0; 5330 info->default_threshold = 0;
5330 info->default_timeout = 0; 5331 info->default_timeout = 0;
5331 INIT_WORK(&info->tqueue, do_softint, info); 5332 INIT_WORK(&info->tqueue, do_softint);
5332 init_waitqueue_head(&info->open_wait); 5333 init_waitqueue_head(&info->open_wait);
5333 init_waitqueue_head(&info->close_wait); 5334 init_waitqueue_head(&info->close_wait);
5334 init_waitqueue_head(&info->shutdown_wait); 5335 init_waitqueue_head(&info->shutdown_wait);
@@ -5403,7 +5404,7 @@ cy_init(void)
5403 info->blocked_open = 0; 5404 info->blocked_open = 0;
5404 info->default_threshold = 0; 5405 info->default_threshold = 0;
5405 info->default_timeout = 0; 5406 info->default_timeout = 0;
5406 INIT_WORK(&info->tqueue, do_softint, info); 5407 INIT_WORK(&info->tqueue, do_softint);
5407 init_waitqueue_head(&info->open_wait); 5408 init_waitqueue_head(&info->open_wait);
5408 init_waitqueue_head(&info->close_wait); 5409 init_waitqueue_head(&info->close_wait);
5409 init_waitqueue_head(&info->shutdown_wait); 5410 init_waitqueue_head(&info->shutdown_wait);
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 60c1695db300..806f9ce5f47b 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -500,9 +500,9 @@ via_dmablit_timer(unsigned long data)
500 500
501 501
502static void 502static void
503via_dmablit_workqueue(void *data) 503via_dmablit_workqueue(struct work_struct *work)
504{ 504{
505 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; 505 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
506 drm_device_t *dev = blitq->dev; 506 drm_device_t *dev = blitq->dev;
507 unsigned long irqsave; 507 unsigned long irqsave;
508 drm_via_sg_info_t *cur_sg; 508 drm_via_sg_info_t *cur_sg;
@@ -571,7 +571,7 @@ via_init_dmablit(drm_device_t *dev)
571 DRM_INIT_WAITQUEUE(blitq->blit_queue + j); 571 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
572 } 572 }
573 DRM_INIT_WAITQUEUE(&blitq->busy_queue); 573 DRM_INIT_WAITQUEUE(&blitq->busy_queue);
574 INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); 574 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
575 init_timer(&blitq->poll_timer); 575 init_timer(&blitq->poll_timer);
576 blitq->poll_timer.function = &via_dmablit_timer; 576 blitq->poll_timer.function = &via_dmablit_timer;
577 blitq->poll_timer.data = (unsigned long) blitq; 577 blitq->poll_timer.data = (unsigned long) blitq;
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 706733c0b36a..7c71eb779802 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -200,7 +200,7 @@ static int pc_ioctl(struct tty_struct *, struct file *,
200static int info_ioctl(struct tty_struct *, struct file *, 200static int info_ioctl(struct tty_struct *, struct file *,
201 unsigned int, unsigned long); 201 unsigned int, unsigned long);
202static void pc_set_termios(struct tty_struct *, struct termios *); 202static void pc_set_termios(struct tty_struct *, struct termios *);
203static void do_softint(void *); 203static void do_softint(struct work_struct *work);
204static void pc_stop(struct tty_struct *); 204static void pc_stop(struct tty_struct *);
205static void pc_start(struct tty_struct *); 205static void pc_start(struct tty_struct *);
206static void pc_throttle(struct tty_struct * tty); 206static void pc_throttle(struct tty_struct * tty);
@@ -1505,7 +1505,7 @@ static void post_fep_init(unsigned int crd)
1505 1505
1506 ch->brdchan = bc; 1506 ch->brdchan = bc;
1507 ch->mailbox = gd; 1507 ch->mailbox = gd;
1508 INIT_WORK(&ch->tqueue, do_softint, ch); 1508 INIT_WORK(&ch->tqueue, do_softint);
1509 ch->board = &boards[crd]; 1509 ch->board = &boards[crd];
1510 1510
1511 spin_lock_irqsave(&epca_lock, flags); 1511 spin_lock_irqsave(&epca_lock, flags);
@@ -2566,9 +2566,9 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
2566 2566
2567/* --------------------- Begin do_softint ----------------------- */ 2567/* --------------------- Begin do_softint ----------------------- */
2568 2568
2569static void do_softint(void *private_) 2569static void do_softint(struct work_struct *work)
2570{ /* Begin do_softint */ 2570{ /* Begin do_softint */
2571 struct channel *ch = (struct channel *) private_; 2571 struct channel *ch = container_of(work, struct channel, tqueue);
2572 /* Called in response to a modem change event */ 2572 /* Called in response to a modem change event */
2573 if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */ 2573 if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */
2574 struct tty_struct *tty = ch->tty; 2574 struct tty_struct *tty = ch->tty;
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 15a4ea896328..93b551962513 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -723,9 +723,10 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
723 * ------------------------------------------------------------------- 723 * -------------------------------------------------------------------
724 */ 724 */
725 725
726static void do_softint(void *private_) 726static void do_softint(struct work_struct *work)
727{ 727{
728 struct esp_struct *info = (struct esp_struct *) private_; 728 struct esp_struct *info =
729 container_of(work, struct esp_struct, tqueue);
729 struct tty_struct *tty; 730 struct tty_struct *tty;
730 731
731 tty = info->tty; 732 tty = info->tty;
@@ -746,9 +747,10 @@ static void do_softint(void *private_)
746 * do_serial_hangup() -> tty->hangup() -> esp_hangup() 747 * do_serial_hangup() -> tty->hangup() -> esp_hangup()
747 * 748 *
748 */ 749 */
749static void do_serial_hangup(void *private_) 750static void do_serial_hangup(struct work_struct *work)
750{ 751{
751 struct esp_struct *info = (struct esp_struct *) private_; 752 struct esp_struct *info =
753 container_of(work, struct esp_struct, tqueue_hangup);
752 struct tty_struct *tty; 754 struct tty_struct *tty;
753 755
754 tty = info->tty; 756 tty = info->tty;
@@ -2501,8 +2503,8 @@ static int __init espserial_init(void)
2501 info->magic = ESP_MAGIC; 2503 info->magic = ESP_MAGIC;
2502 info->close_delay = 5*HZ/10; 2504 info->close_delay = 5*HZ/10;
2503 info->closing_wait = 30*HZ; 2505 info->closing_wait = 30*HZ;
2504 INIT_WORK(&info->tqueue, do_softint, info); 2506 INIT_WORK(&info->tqueue, do_softint);
2505 INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info); 2507 INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
2506 info->config.rx_timeout = rx_timeout; 2508 info->config.rx_timeout = rx_timeout;
2507 info->config.flow_on = flow_on; 2509 info->config.flow_on = flow_on;
2508 info->config.flow_off = flow_off; 2510 info->config.flow_off = flow_off;
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 817dc409ac20..23b25ada65ea 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -102,7 +102,7 @@ static void gen_rtc_interrupt(unsigned long arg);
102 * Routine to poll RTC seconds field for change as often as possible, 102 * Routine to poll RTC seconds field for change as often as possible,
103 * after first RTC_UIE use timer to reduce polling 103 * after first RTC_UIE use timer to reduce polling
104 */ 104 */
105static void genrtc_troutine(void *data) 105static void genrtc_troutine(struct work_struct *work)
106{ 106{
107 unsigned int tmp = get_rtc_ss(); 107 unsigned int tmp = get_rtc_ss();
108 108
@@ -255,7 +255,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit)
255 irq_active = 1; 255 irq_active = 1;
256 stop_rtc_timers = 0; 256 stop_rtc_timers = 0;
257 lostint = 0; 257 lostint = 0;
258 INIT_WORK(&genrtc_task, genrtc_troutine, NULL); 258 INIT_WORK(&genrtc_task, genrtc_troutine);
259 oldsecs = get_rtc_ss(); 259 oldsecs = get_rtc_ss();
260 init_timer(&timer_task); 260 init_timer(&timer_task);
261 261
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 2cf63e7305a3..82a41d5b4ed0 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -69,7 +69,7 @@
69#define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) 69#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
70 70
71struct hvsi_struct { 71struct hvsi_struct {
72 struct work_struct writer; 72 struct delayed_work writer;
73 struct work_struct handshaker; 73 struct work_struct handshaker;
74 wait_queue_head_t emptyq; /* woken when outbuf is emptied */ 74 wait_queue_head_t emptyq; /* woken when outbuf is emptied */
75 wait_queue_head_t stateq; /* woken when HVSI state changes */ 75 wait_queue_head_t stateq; /* woken when HVSI state changes */
@@ -744,9 +744,10 @@ static int hvsi_handshake(struct hvsi_struct *hp)
744 return 0; 744 return 0;
745} 745}
746 746
747static void hvsi_handshaker(void *arg) 747static void hvsi_handshaker(struct work_struct *work)
748{ 748{
749 struct hvsi_struct *hp = (struct hvsi_struct *)arg; 749 struct hvsi_struct *hp =
750 container_of(work, struct hvsi_struct, handshaker);
750 751
751 if (hvsi_handshake(hp) >= 0) 752 if (hvsi_handshake(hp) >= 0)
752 return; 753 return;
@@ -951,9 +952,10 @@ static void hvsi_push(struct hvsi_struct *hp)
951} 952}
952 953
953/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ 954/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
954static void hvsi_write_worker(void *arg) 955static void hvsi_write_worker(struct work_struct *work)
955{ 956{
956 struct hvsi_struct *hp = (struct hvsi_struct *)arg; 957 struct hvsi_struct *hp =
958 container_of(work, struct hvsi_struct, writer.work);
957 unsigned long flags; 959 unsigned long flags;
958#ifdef DEBUG 960#ifdef DEBUG
959 static long start_j = 0; 961 static long start_j = 0;
@@ -1287,8 +1289,8 @@ static int __init hvsi_console_init(void)
1287 } 1289 }
1288 1290
1289 hp = &hvsi_ports[hvsi_count]; 1291 hp = &hvsi_ports[hvsi_count];
1290 INIT_WORK(&hp->writer, hvsi_write_worker, hp); 1292 INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
1291 INIT_WORK(&hp->handshaker, hvsi_handshaker, hp); 1293 INIT_WORK(&hp->handshaker, hvsi_handshaker);
1292 init_waitqueue_head(&hp->emptyq); 1294 init_waitqueue_head(&hp->emptyq);
1293 init_waitqueue_head(&hp->stateq); 1295 init_waitqueue_head(&hp->stateq);
1294 spin_lock_init(&hp->lock); 1296 spin_lock_init(&hp->lock);
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index 54d93f0345e8..c213fdbdb2b0 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -84,8 +84,8 @@ static void iiSendPendingMail(i2eBordStrPtr);
84static void serviceOutgoingFifo(i2eBordStrPtr); 84static void serviceOutgoingFifo(i2eBordStrPtr);
85 85
86// Functions defined in ip2.c as part of interrupt handling 86// Functions defined in ip2.c as part of interrupt handling
87static void do_input(void *); 87static void do_input(struct work_struct *);
88static void do_status(void *); 88static void do_status(struct work_struct *);
89 89
90//*************** 90//***************
91//* Debug Data * 91//* Debug Data *
@@ -331,8 +331,8 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
331 pCh->ClosingWaitTime = 30*HZ; 331 pCh->ClosingWaitTime = 30*HZ;
332 332
333 // Initialize task queue objects 333 // Initialize task queue objects
334 INIT_WORK(&pCh->tqueue_input, do_input, pCh); 334 INIT_WORK(&pCh->tqueue_input, do_input);
335 INIT_WORK(&pCh->tqueue_status, do_status, pCh); 335 INIT_WORK(&pCh->tqueue_status, do_status);
336 336
337#ifdef IP2DEBUG_TRACE 337#ifdef IP2DEBUG_TRACE
338 pCh->trace = ip2trace; 338 pCh->trace = ip2trace;
@@ -1573,7 +1573,7 @@ i2StripFifo(i2eBordStrPtr pB)
1573#ifdef USE_IQ 1573#ifdef USE_IQ
1574 schedule_work(&pCh->tqueue_input); 1574 schedule_work(&pCh->tqueue_input);
1575#else 1575#else
1576 do_input(pCh); 1576 do_input(&pCh->tqueue_input);
1577#endif 1577#endif
1578 1578
1579 // Note we do not need to maintain any flow-control credits at this 1579 // Note we do not need to maintain any flow-control credits at this
@@ -1810,7 +1810,7 @@ i2StripFifo(i2eBordStrPtr pB)
1810#ifdef USE_IQ 1810#ifdef USE_IQ
1811 schedule_work(&pCh->tqueue_status); 1811 schedule_work(&pCh->tqueue_status);
1812#else 1812#else
1813 do_status(pCh); 1813 do_status(&pCh->tqueue_status);
1814#endif 1814#endif
1815 } 1815 }
1816 } 1816 }
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index a3f32d46d2f8..cda2459c1d60 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -189,12 +189,12 @@ static int ip2_tiocmset(struct tty_struct *tty, struct file *file,
189 unsigned int set, unsigned int clear); 189 unsigned int set, unsigned int clear);
190 190
191static void set_irq(int, int); 191static void set_irq(int, int);
192static void ip2_interrupt_bh(i2eBordStrPtr pB); 192static void ip2_interrupt_bh(struct work_struct *work);
193static irqreturn_t ip2_interrupt(int irq, void *dev_id); 193static irqreturn_t ip2_interrupt(int irq, void *dev_id);
194static void ip2_poll(unsigned long arg); 194static void ip2_poll(unsigned long arg);
195static inline void service_all_boards(void); 195static inline void service_all_boards(void);
196static void do_input(void *p); 196static void do_input(struct work_struct *);
197static void do_status(void *p); 197static void do_status(struct work_struct *);
198 198
199static void ip2_wait_until_sent(PTTY,int); 199static void ip2_wait_until_sent(PTTY,int);
200 200
@@ -918,7 +918,7 @@ ip2_init_board( int boardnum )
918 pCh++; 918 pCh++;
919 } 919 }
920ex_exit: 920ex_exit:
921 INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB); 921 INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
922 return; 922 return;
923 923
924err_release_region: 924err_release_region:
@@ -1125,8 +1125,8 @@ service_all_boards(void)
1125 1125
1126 1126
1127/******************************************************************************/ 1127/******************************************************************************/
1128/* Function: ip2_interrupt_bh(pB) */ 1128/* Function: ip2_interrupt_bh(work) */
1129/* Parameters: pB - pointer to the board structure */ 1129/* Parameters: work - pointer to the board structure */
1130/* Returns: Nothing */ 1130/* Returns: Nothing */
1131/* */ 1131/* */
1132/* Description: */ 1132/* Description: */
@@ -1135,8 +1135,9 @@ service_all_boards(void)
1135/* */ 1135/* */
1136/******************************************************************************/ 1136/******************************************************************************/
1137static void 1137static void
1138ip2_interrupt_bh(i2eBordStrPtr pB) 1138ip2_interrupt_bh(struct work_struct *work)
1139{ 1139{
1140 i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
1140// pB better well be set or we have a problem! We can only get 1141// pB better well be set or we have a problem! We can only get
1141// here from the IMMEDIATE queue. Here, we process the boards. 1142// here from the IMMEDIATE queue. Here, we process the boards.
1142// Checking pB doesn't cost much and it saves us from the sanity checkers. 1143// Checking pB doesn't cost much and it saves us from the sanity checkers.
@@ -1245,9 +1246,9 @@ ip2_poll(unsigned long arg)
1245 ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 ); 1246 ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
1246} 1247}
1247 1248
1248static void do_input(void *p) 1249static void do_input(struct work_struct *work)
1249{ 1250{
1250 i2ChanStrPtr pCh = p; 1251 i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
1251 unsigned long flags; 1252 unsigned long flags;
1252 1253
1253 ip2trace(CHANN, ITRC_INPUT, 21, 0 ); 1254 ip2trace(CHANN, ITRC_INPUT, 21, 0 );
@@ -1279,9 +1280,9 @@ static inline void isig(int sig, struct tty_struct *tty, int flush)
1279 } 1280 }
1280} 1281}
1281 1282
1282static void do_status(void *p) 1283static void do_status(struct work_struct *work)
1283{ 1284{
1284 i2ChanStrPtr pCh = p; 1285 i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
1285 int status; 1286 int status;
1286 1287
1287 status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) ); 1288 status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 58c955e390b3..1637c1d9a4ba 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -530,9 +530,9 @@ sched_again:
530/* Interrupt handlers */ 530/* Interrupt handlers */
531 531
532 532
533static void isicom_bottomhalf(void *data) 533static void isicom_bottomhalf(struct work_struct *work)
534{ 534{
535 struct isi_port *port = (struct isi_port *) data; 535 struct isi_port *port = container_of(work, struct isi_port, bh_tqueue);
536 struct tty_struct *tty = port->tty; 536 struct tty_struct *tty = port->tty;
537 537
538 if (!tty) 538 if (!tty)
@@ -1474,9 +1474,9 @@ static void isicom_start(struct tty_struct *tty)
1474} 1474}
1475 1475
1476/* hangup et all */ 1476/* hangup et all */
1477static void do_isicom_hangup(void *data) 1477static void do_isicom_hangup(struct work_struct *work)
1478{ 1478{
1479 struct isi_port *port = data; 1479 struct isi_port *port = container_of(work, struct isi_port, hangup_tq);
1480 struct tty_struct *tty; 1480 struct tty_struct *tty;
1481 1481
1482 tty = port->tty; 1482 tty = port->tty;
@@ -1966,8 +1966,8 @@ static int __devinit isicom_setup(void)
1966 port->channel = channel; 1966 port->channel = channel;
1967 port->close_delay = 50 * HZ/100; 1967 port->close_delay = 50 * HZ/100;
1968 port->closing_wait = 3000 * HZ/100; 1968 port->closing_wait = 3000 * HZ/100;
1969 INIT_WORK(&port->hangup_tq, do_isicom_hangup, port); 1969 INIT_WORK(&port->hangup_tq, do_isicom_hangup);
1970 INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port); 1970 INIT_WORK(&port->bh_tqueue, isicom_bottomhalf);
1971 port->status = 0; 1971 port->status = 0;
1972 init_waitqueue_head(&port->open_wait); 1972 init_waitqueue_head(&port->open_wait);
1973 init_waitqueue_head(&port->close_wait); 1973 init_waitqueue_head(&port->close_wait);
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 96cb1f07332b..2d025a9fd14d 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -222,7 +222,7 @@ static struct semaphore moxaBuffSem;
222/* 222/*
223 * static functions: 223 * static functions:
224 */ 224 */
225static void do_moxa_softint(void *); 225static void do_moxa_softint(struct work_struct *);
226static int moxa_open(struct tty_struct *, struct file *); 226static int moxa_open(struct tty_struct *, struct file *);
227static void moxa_close(struct tty_struct *, struct file *); 227static void moxa_close(struct tty_struct *, struct file *);
228static int moxa_write(struct tty_struct *, const unsigned char *, int); 228static int moxa_write(struct tty_struct *, const unsigned char *, int);
@@ -363,7 +363,7 @@ static int __init moxa_init(void)
363 for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) { 363 for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
364 ch->type = PORT_16550A; 364 ch->type = PORT_16550A;
365 ch->port = i; 365 ch->port = i;
366 INIT_WORK(&ch->tqueue, do_moxa_softint, ch); 366 INIT_WORK(&ch->tqueue, do_moxa_softint);
367 ch->tty = NULL; 367 ch->tty = NULL;
368 ch->close_delay = 5 * HZ / 10; 368 ch->close_delay = 5 * HZ / 10;
369 ch->closing_wait = 30 * HZ; 369 ch->closing_wait = 30 * HZ;
@@ -509,9 +509,9 @@ static void __exit moxa_exit(void)
509module_init(moxa_init); 509module_init(moxa_init);
510module_exit(moxa_exit); 510module_exit(moxa_exit);
511 511
512static void do_moxa_softint(void *private_) 512static void do_moxa_softint(struct work_struct *work)
513{ 513{
514 struct moxa_str *ch = (struct moxa_str *) private_; 514 struct moxa_str *ch = container_of(work, struct moxa_str, tqueue);
515 struct tty_struct *tty; 515 struct tty_struct *tty;
516 516
517 if (ch && (tty = ch->tty)) { 517 if (ch && (tty = ch->tty)) {
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 048d91142c17..5ed2486b7581 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -389,7 +389,7 @@ static int mxser_init(void);
389/* static void mxser_poll(unsigned long); */ 389/* static void mxser_poll(unsigned long); */
390static int mxser_get_ISA_conf(int, struct mxser_hwconf *); 390static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
391static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *); 391static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
392static void mxser_do_softint(void *); 392static void mxser_do_softint(struct work_struct *);
393static int mxser_open(struct tty_struct *, struct file *); 393static int mxser_open(struct tty_struct *, struct file *);
394static void mxser_close(struct tty_struct *, struct file *); 394static void mxser_close(struct tty_struct *, struct file *);
395static int mxser_write(struct tty_struct *, const unsigned char *, int); 395static int mxser_write(struct tty_struct *, const unsigned char *, int);
@@ -590,7 +590,7 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf)
590 info->custom_divisor = hwconf->baud_base[i] * 16; 590 info->custom_divisor = hwconf->baud_base[i] * 16;
591 info->close_delay = 5 * HZ / 10; 591 info->close_delay = 5 * HZ / 10;
592 info->closing_wait = 30 * HZ; 592 info->closing_wait = 30 * HZ;
593 INIT_WORK(&info->tqueue, mxser_do_softint, info); 593 INIT_WORK(&info->tqueue, mxser_do_softint);
594 info->normal_termios = mxvar_sdriver->init_termios; 594 info->normal_termios = mxvar_sdriver->init_termios;
595 init_waitqueue_head(&info->open_wait); 595 init_waitqueue_head(&info->open_wait);
596 init_waitqueue_head(&info->close_wait); 596 init_waitqueue_head(&info->close_wait);
@@ -917,9 +917,10 @@ static int mxser_init(void)
917 return 0; 917 return 0;
918} 918}
919 919
920static void mxser_do_softint(void *private_) 920static void mxser_do_softint(struct work_struct *work)
921{ 921{
922 struct mxser_struct *info = private_; 922 struct mxser_struct *info =
923 container_of(work, struct mxser_struct, tqueue);
923 struct tty_struct *tty; 924 struct tty_struct *tty;
924 925
925 tty = info->tty; 926 tty = info->tty;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 1a0bc30b79d1..e4d950072b5a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -421,7 +421,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id);
421/* 421/*
422 * Bottom half interrupt handlers 422 * Bottom half interrupt handlers
423 */ 423 */
424static void bh_handler(void* Context); 424static void bh_handler(struct work_struct *work);
425static void bh_transmit(MGSLPC_INFO *info); 425static void bh_transmit(MGSLPC_INFO *info);
426static void bh_status(MGSLPC_INFO *info); 426static void bh_status(MGSLPC_INFO *info);
427 427
@@ -547,7 +547,7 @@ static int mgslpc_probe(struct pcmcia_device *link)
547 547
548 memset(info, 0, sizeof(MGSLPC_INFO)); 548 memset(info, 0, sizeof(MGSLPC_INFO));
549 info->magic = MGSLPC_MAGIC; 549 info->magic = MGSLPC_MAGIC;
550 INIT_WORK(&info->task, bh_handler, info); 550 INIT_WORK(&info->task, bh_handler);
551 info->max_frame_size = 4096; 551 info->max_frame_size = 4096;
552 info->close_delay = 5*HZ/10; 552 info->close_delay = 5*HZ/10;
553 info->closing_wait = 30*HZ; 553 info->closing_wait = 30*HZ;
@@ -842,9 +842,9 @@ static int bh_action(MGSLPC_INFO *info)
842 return rc; 842 return rc;
843} 843}
844 844
845static void bh_handler(void* Context) 845static void bh_handler(struct work_struct *work)
846{ 846{
847 MGSLPC_INFO *info = (MGSLPC_INFO*)Context; 847 MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
848 int action; 848 int action;
849 849
850 if (!info) 850 if (!info)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d40df30c2b10..4c6782a1ecdb 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1422,9 +1422,9 @@ static struct keydata {
1422 1422
1423static unsigned int ip_cnt; 1423static unsigned int ip_cnt;
1424 1424
1425static void rekey_seq_generator(void *private_); 1425static void rekey_seq_generator(struct work_struct *work);
1426 1426
1427static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL); 1427static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
1428 1428
1429/* 1429/*
1430 * Lock avoidance: 1430 * Lock avoidance:
@@ -1438,7 +1438,7 @@ static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
1438 * happen, and even if that happens only a not perfectly compliant 1438 * happen, and even if that happens only a not perfectly compliant
1439 * ISN is generated, nothing fatal. 1439 * ISN is generated, nothing fatal.
1440 */ 1440 */
1441static void rekey_seq_generator(void *private_) 1441static void rekey_seq_generator(struct work_struct *work)
1442{ 1442{
1443 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; 1443 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
1444 1444
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index c084149153de..fc87070f1866 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -765,7 +765,7 @@ static void sonypi_setbluetoothpower(u8 state)
765 sonypi_device.bluetooth_power = state; 765 sonypi_device.bluetooth_power = state;
766} 766}
767 767
768static void input_keyrelease(void *data) 768static void input_keyrelease(struct work_struct *work)
769{ 769{
770 struct sonypi_keypress kp; 770 struct sonypi_keypress kp;
771 771
@@ -1412,7 +1412,7 @@ static int __devinit sonypi_probe(struct platform_device *dev)
1412 goto err_inpdev_unregister; 1412 goto err_inpdev_unregister;
1413 } 1413 }
1414 1414
1415 INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL); 1415 INIT_WORK(&sonypi_device.input_work, input_keyrelease);
1416 } 1416 }
1417 1417
1418 sonypi_enable(0); 1418 sonypi_enable(0);
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 7e1bd9562c2a..99137ab66b62 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2261,9 +2261,10 @@ static void sx_start(struct tty_struct * tty)
2261 * do_sx_hangup() -> tty->hangup() -> sx_hangup() 2261 * do_sx_hangup() -> tty->hangup() -> sx_hangup()
2262 * 2262 *
2263 */ 2263 */
2264static void do_sx_hangup(void *private_) 2264static void do_sx_hangup(struct work_struct *work)
2265{ 2265{
2266 struct specialix_port *port = (struct specialix_port *) private_; 2266 struct specialix_port *port =
2267 container_of(work, struct specialix_port, tqueue_hangup);
2267 struct tty_struct *tty; 2268 struct tty_struct *tty;
2268 2269
2269 func_enter(); 2270 func_enter();
@@ -2336,9 +2337,10 @@ static void sx_set_termios(struct tty_struct * tty, struct termios * old_termios
2336} 2337}
2337 2338
2338 2339
2339static void do_softint(void *private_) 2340static void do_softint(struct work_struct *work)
2340{ 2341{
2341 struct specialix_port *port = (struct specialix_port *) private_; 2342 struct specialix_port *port =
2343 container_of(work, struct specialix_port, tqueue);
2342 struct tty_struct *tty; 2344 struct tty_struct *tty;
2343 2345
2344 func_enter(); 2346 func_enter();
@@ -2411,8 +2413,8 @@ static int sx_init_drivers(void)
2411 memset(sx_port, 0, sizeof(sx_port)); 2413 memset(sx_port, 0, sizeof(sx_port));
2412 for (i = 0; i < SX_NPORT * SX_NBOARD; i++) { 2414 for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
2413 sx_port[i].magic = SPECIALIX_MAGIC; 2415 sx_port[i].magic = SPECIALIX_MAGIC;
2414 INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]); 2416 INIT_WORK(&sx_port[i].tqueue, do_softint);
2415 INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]); 2417 INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
2416 sx_port[i].close_delay = 50 * HZ/100; 2418 sx_port[i].close_delay = 50 * HZ/100;
2417 sx_port[i].closing_wait = 3000 * HZ/100; 2419 sx_port[i].closing_wait = 3000 * HZ/100;
2418 init_waitqueue_head(&sx_port[i].open_wait); 2420 init_waitqueue_head(&sx_port[i].open_wait);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 06784adcc35c..147c30da81ea 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -802,7 +802,7 @@ static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, u
802/* 802/*
803 * Bottom half interrupt handlers 803 * Bottom half interrupt handlers
804 */ 804 */
805static void mgsl_bh_handler(void* Context); 805static void mgsl_bh_handler(struct work_struct *work);
806static void mgsl_bh_receive(struct mgsl_struct *info); 806static void mgsl_bh_receive(struct mgsl_struct *info);
807static void mgsl_bh_transmit(struct mgsl_struct *info); 807static void mgsl_bh_transmit(struct mgsl_struct *info);
808static void mgsl_bh_status(struct mgsl_struct *info); 808static void mgsl_bh_status(struct mgsl_struct *info);
@@ -1071,9 +1071,10 @@ static int mgsl_bh_action(struct mgsl_struct *info)
1071/* 1071/*
1072 * Perform bottom half processing of work items queued by ISR. 1072 * Perform bottom half processing of work items queued by ISR.
1073 */ 1073 */
1074static void mgsl_bh_handler(void* Context) 1074static void mgsl_bh_handler(struct work_struct *work)
1075{ 1075{
1076 struct mgsl_struct *info = (struct mgsl_struct*)Context; 1076 struct mgsl_struct *info =
1077 container_of(work, struct mgsl_struct, task);
1077 int action; 1078 int action;
1078 1079
1079 if (!info) 1080 if (!info)
@@ -4337,7 +4338,7 @@ static struct mgsl_struct* mgsl_allocate_device(void)
4337 } else { 4338 } else {
4338 memset(info, 0, sizeof(struct mgsl_struct)); 4339 memset(info, 0, sizeof(struct mgsl_struct));
4339 info->magic = MGSL_MAGIC; 4340 info->magic = MGSL_MAGIC;
4340 INIT_WORK(&info->task, mgsl_bh_handler, info); 4341 INIT_WORK(&info->task, mgsl_bh_handler);
4341 info->max_frame_size = 4096; 4342 info->max_frame_size = 4096;
4342 info->close_delay = 5*HZ/10; 4343 info->close_delay = 5*HZ/10;
4343 info->closing_wait = 30*HZ; 4344 info->closing_wait = 30*HZ;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index d4334c79f8d4..07f34d43dc7f 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -485,7 +485,7 @@ static void enable_loopback(struct slgt_info *info);
485static void set_rate(struct slgt_info *info, u32 data_rate); 485static void set_rate(struct slgt_info *info, u32 data_rate);
486 486
487static int bh_action(struct slgt_info *info); 487static int bh_action(struct slgt_info *info);
488static void bh_handler(void* context); 488static void bh_handler(struct work_struct *work);
489static void bh_transmit(struct slgt_info *info); 489static void bh_transmit(struct slgt_info *info);
490static void isr_serial(struct slgt_info *info); 490static void isr_serial(struct slgt_info *info);
491static void isr_rdma(struct slgt_info *info); 491static void isr_rdma(struct slgt_info *info);
@@ -1878,9 +1878,9 @@ static int bh_action(struct slgt_info *info)
1878/* 1878/*
1879 * perform bottom half processing 1879 * perform bottom half processing
1880 */ 1880 */
1881static void bh_handler(void* context) 1881static void bh_handler(struct work_struct *work)
1882{ 1882{
1883 struct slgt_info *info = context; 1883 struct slgt_info *info = container_of(work, struct slgt_info, task);
1884 int action; 1884 int action;
1885 1885
1886 if (!info) 1886 if (!info)
@@ -3326,7 +3326,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
3326 } else { 3326 } else {
3327 memset(info, 0, sizeof(struct slgt_info)); 3327 memset(info, 0, sizeof(struct slgt_info));
3328 info->magic = MGSL_MAGIC; 3328 info->magic = MGSL_MAGIC;
3329 INIT_WORK(&info->task, bh_handler, info); 3329 INIT_WORK(&info->task, bh_handler);
3330 info->max_frame_size = 4096; 3330 info->max_frame_size = 4096;
3331 info->raw_rx_size = DMABUFSIZE; 3331 info->raw_rx_size = DMABUFSIZE;
3332 info->close_delay = 5*HZ/10; 3332 info->close_delay = 5*HZ/10;
@@ -4799,6 +4799,6 @@ static void rx_timeout(unsigned long context)
4799 spin_lock_irqsave(&info->lock, flags); 4799 spin_lock_irqsave(&info->lock, flags);
4800 info->pending_bh |= BH_RECEIVE; 4800 info->pending_bh |= BH_RECEIVE;
4801 spin_unlock_irqrestore(&info->lock, flags); 4801 spin_unlock_irqrestore(&info->lock, flags);
4802 bh_handler(info); 4802 bh_handler(&info->task);
4803} 4803}
4804 4804
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 3e932b681371..13a57245cf2e 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -602,7 +602,7 @@ static void enable_loopback(SLMP_INFO *info, int enable);
602static void set_rate(SLMP_INFO *info, u32 data_rate); 602static void set_rate(SLMP_INFO *info, u32 data_rate);
603 603
604static int bh_action(SLMP_INFO *info); 604static int bh_action(SLMP_INFO *info);
605static void bh_handler(void* Context); 605static void bh_handler(struct work_struct *work);
606static void bh_receive(SLMP_INFO *info); 606static void bh_receive(SLMP_INFO *info);
607static void bh_transmit(SLMP_INFO *info); 607static void bh_transmit(SLMP_INFO *info);
608static void bh_status(SLMP_INFO *info); 608static void bh_status(SLMP_INFO *info);
@@ -2063,9 +2063,9 @@ int bh_action(SLMP_INFO *info)
2063 2063
2064/* Perform bottom half processing of work items queued by ISR. 2064/* Perform bottom half processing of work items queued by ISR.
2065 */ 2065 */
2066void bh_handler(void* Context) 2066void bh_handler(struct work_struct *work)
2067{ 2067{
2068 SLMP_INFO *info = (SLMP_INFO*)Context; 2068 SLMP_INFO *info = container_of(work, SLMP_INFO, task);
2069 int action; 2069 int action;
2070 2070
2071 if (!info) 2071 if (!info)
@@ -3805,7 +3805,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
3805 } else { 3805 } else {
3806 memset(info, 0, sizeof(SLMP_INFO)); 3806 memset(info, 0, sizeof(SLMP_INFO));
3807 info->magic = MGSL_MAGIC; 3807 info->magic = MGSL_MAGIC;
3808 INIT_WORK(&info->task, bh_handler, info); 3808 INIT_WORK(&info->task, bh_handler);
3809 info->max_frame_size = 4096; 3809 info->max_frame_size = 4096;
3810 info->close_delay = 5*HZ/10; 3810 info->close_delay = 5*HZ/10;
3811 info->closing_wait = 30*HZ; 3811 info->closing_wait = 30*HZ;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5f49280779fb..c64f5bcff947 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -219,13 +219,13 @@ static struct sysrq_key_op sysrq_term_op = {
219 .enable_mask = SYSRQ_ENABLE_SIGNAL, 219 .enable_mask = SYSRQ_ENABLE_SIGNAL,
220}; 220};
221 221
222static void moom_callback(void *ignored) 222static void moom_callback(struct work_struct *ignored)
223{ 223{
224 out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], 224 out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
225 GFP_KERNEL, 0); 225 GFP_KERNEL, 0);
226} 226}
227 227
228static DECLARE_WORK(moom_work, moom_callback, NULL); 228static DECLARE_WORK(moom_work, moom_callback);
229 229
230static void sysrq_handle_moom(int key, struct tty_struct *tty) 230static void sysrq_handle_moom(int key, struct tty_struct *tty)
231{ 231{
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 6e1329d404d2..774fa861169a 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -325,9 +325,9 @@ static void user_reader_timeout(unsigned long ptr)
325 schedule_work(&chip->work); 325 schedule_work(&chip->work);
326} 326}
327 327
328static void timeout_work(void *ptr) 328static void timeout_work(struct work_struct *work)
329{ 329{
330 struct tpm_chip *chip = ptr; 330 struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
331 331
332 down(&chip->buffer_mutex); 332 down(&chip->buffer_mutex);
333 atomic_set(&chip->data_pending, 0); 333 atomic_set(&chip->data_pending, 0);
@@ -1105,7 +1105,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
1105 init_MUTEX(&chip->tpm_mutex); 1105 init_MUTEX(&chip->tpm_mutex);
1106 INIT_LIST_HEAD(&chip->list); 1106 INIT_LIST_HEAD(&chip->list);
1107 1107
1108 INIT_WORK(&chip->work, timeout_work, chip); 1108 INIT_WORK(&chip->work, timeout_work);
1109 1109
1110 init_timer(&chip->user_read_timer); 1110 init_timer(&chip->user_read_timer);
1111 chip->user_read_timer.function = user_reader_timeout; 1111 chip->user_read_timer.function = user_reader_timeout;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 50dc49205a23..b3cfc8bc613c 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1254,7 +1254,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
1254 1254
1255/** 1255/**
1256 * do_tty_hangup - actual handler for hangup events 1256 * do_tty_hangup - actual handler for hangup events
1257 * @data: tty device 1257 * @work: tty device
1258 * 1258 *
1259 * This can be called by the "eventd" kernel thread. That is process 1259 * This can be called by the "eventd" kernel thread. That is process
1260 * synchronous but doesn't hold any locks, so we need to make sure we 1260 * synchronous but doesn't hold any locks, so we need to make sure we
@@ -1274,9 +1274,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
1274 * tasklist_lock to walk task list for hangup event 1274 * tasklist_lock to walk task list for hangup event
1275 * 1275 *
1276 */ 1276 */
1277static void do_tty_hangup(void *data) 1277static void do_tty_hangup(struct work_struct *work)
1278{ 1278{
1279 struct tty_struct *tty = (struct tty_struct *) data; 1279 struct tty_struct *tty =
1280 container_of(work, struct tty_struct, hangup_work);
1280 struct file * cons_filp = NULL; 1281 struct file * cons_filp = NULL;
1281 struct file *filp, *f = NULL; 1282 struct file *filp, *f = NULL;
1282 struct task_struct *p; 1283 struct task_struct *p;
@@ -1433,7 +1434,7 @@ void tty_vhangup(struct tty_struct * tty)
1433 1434
1434 printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf)); 1435 printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
1435#endif 1436#endif
1436 do_tty_hangup((void *) tty); 1437 do_tty_hangup(&tty->hangup_work);
1437} 1438}
1438EXPORT_SYMBOL(tty_vhangup); 1439EXPORT_SYMBOL(tty_vhangup);
1439 1440
@@ -3304,12 +3305,13 @@ int tty_ioctl(struct inode * inode, struct file * file,
3304 * Nasty bug: do_SAK is being called in interrupt context. This can 3305 * Nasty bug: do_SAK is being called in interrupt context. This can
3305 * deadlock. We punt it up to process context. AKPM - 16Mar2001 3306 * deadlock. We punt it up to process context. AKPM - 16Mar2001
3306 */ 3307 */
3307static void __do_SAK(void *arg) 3308static void __do_SAK(struct work_struct *work)
3308{ 3309{
3310 struct tty_struct *tty =
3311 container_of(work, struct tty_struct, SAK_work);
3309#ifdef TTY_SOFT_SAK 3312#ifdef TTY_SOFT_SAK
3310 tty_hangup(tty); 3313 tty_hangup(tty);
3311#else 3314#else
3312 struct tty_struct *tty = arg;
3313 struct task_struct *g, *p; 3315 struct task_struct *g, *p;
3314 int session; 3316 int session;
3315 int i; 3317 int i;
@@ -3388,7 +3390,7 @@ void do_SAK(struct tty_struct *tty)
3388{ 3390{
3389 if (!tty) 3391 if (!tty)
3390 return; 3392 return;
3391 PREPARE_WORK(&tty->SAK_work, __do_SAK, tty); 3393 PREPARE_WORK(&tty->SAK_work, __do_SAK);
3392 schedule_work(&tty->SAK_work); 3394 schedule_work(&tty->SAK_work);
3393} 3395}
3394 3396
@@ -3396,7 +3398,7 @@ EXPORT_SYMBOL(do_SAK);
3396 3398
3397/** 3399/**
3398 * flush_to_ldisc 3400 * flush_to_ldisc
3399 * @private_: tty structure passed from work queue. 3401 * @work: tty structure passed from work queue.
3400 * 3402 *
3401 * This routine is called out of the software interrupt to flush data 3403 * This routine is called out of the software interrupt to flush data
3402 * from the buffer chain to the line discipline. 3404 * from the buffer chain to the line discipline.
@@ -3406,9 +3408,10 @@ EXPORT_SYMBOL(do_SAK);
3406 * receive_buf method is single threaded for each tty instance. 3408 * receive_buf method is single threaded for each tty instance.
3407 */ 3409 */
3408 3410
3409static void flush_to_ldisc(void *private_) 3411static void flush_to_ldisc(struct work_struct *work)
3410{ 3412{
3411 struct tty_struct *tty = (struct tty_struct *) private_; 3413 struct tty_struct *tty =
3414 container_of(work, struct tty_struct, buf.work.work);
3412 unsigned long flags; 3415 unsigned long flags;
3413 struct tty_ldisc *disc; 3416 struct tty_ldisc *disc;
3414 struct tty_buffer *tbuf, *head; 3417 struct tty_buffer *tbuf, *head;
@@ -3553,7 +3556,7 @@ void tty_flip_buffer_push(struct tty_struct *tty)
3553 spin_unlock_irqrestore(&tty->buf.lock, flags); 3556 spin_unlock_irqrestore(&tty->buf.lock, flags);
3554 3557
3555 if (tty->low_latency) 3558 if (tty->low_latency)
3556 flush_to_ldisc((void *) tty); 3559 flush_to_ldisc(&tty->buf.work.work);
3557 else 3560 else
3558 schedule_delayed_work(&tty->buf.work, 1); 3561 schedule_delayed_work(&tty->buf.work, 1);
3559} 3562}
@@ -3580,17 +3583,17 @@ static void initialize_tty_struct(struct tty_struct *tty)
3580 tty->overrun_time = jiffies; 3583 tty->overrun_time = jiffies;
3581 tty->buf.head = tty->buf.tail = NULL; 3584 tty->buf.head = tty->buf.tail = NULL;
3582 tty_buffer_init(tty); 3585 tty_buffer_init(tty);
3583 INIT_WORK(&tty->buf.work, flush_to_ldisc, tty); 3586 INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
3584 init_MUTEX(&tty->buf.pty_sem); 3587 init_MUTEX(&tty->buf.pty_sem);
3585 mutex_init(&tty->termios_mutex); 3588 mutex_init(&tty->termios_mutex);
3586 init_waitqueue_head(&tty->write_wait); 3589 init_waitqueue_head(&tty->write_wait);
3587 init_waitqueue_head(&tty->read_wait); 3590 init_waitqueue_head(&tty->read_wait);
3588 INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); 3591 INIT_WORK(&tty->hangup_work, do_tty_hangup);
3589 mutex_init(&tty->atomic_read_lock); 3592 mutex_init(&tty->atomic_read_lock);
3590 mutex_init(&tty->atomic_write_lock); 3593 mutex_init(&tty->atomic_write_lock);
3591 spin_lock_init(&tty->read_lock); 3594 spin_lock_init(&tty->read_lock);
3592 INIT_LIST_HEAD(&tty->tty_files); 3595 INIT_LIST_HEAD(&tty->tty_files);
3593 INIT_WORK(&tty->SAK_work, NULL, NULL); 3596 INIT_WORK(&tty->SAK_work, NULL);
3594} 3597}
3595 3598
3596/* 3599/*
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 87587b4385ab..75ff0286e1ad 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -155,7 +155,7 @@ static void con_flush_chars(struct tty_struct *tty);
155static void set_vesa_blanking(char __user *p); 155static void set_vesa_blanking(char __user *p);
156static void set_cursor(struct vc_data *vc); 156static void set_cursor(struct vc_data *vc);
157static void hide_cursor(struct vc_data *vc); 157static void hide_cursor(struct vc_data *vc);
158static void console_callback(void *ignored); 158static void console_callback(struct work_struct *ignored);
159static void blank_screen_t(unsigned long dummy); 159static void blank_screen_t(unsigned long dummy);
160static void set_palette(struct vc_data *vc); 160static void set_palette(struct vc_data *vc);
161 161
@@ -174,7 +174,7 @@ static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
174static int blankinterval = 10*60*HZ; 174static int blankinterval = 10*60*HZ;
175static int vesa_off_interval; 175static int vesa_off_interval;
176 176
177static DECLARE_WORK(console_work, console_callback, NULL); 177static DECLARE_WORK(console_work, console_callback);
178 178
179/* 179/*
180 * fg_console is the current virtual console, 180 * fg_console is the current virtual console,
@@ -2154,7 +2154,7 @@ out:
2154 * with other console code and prevention of re-entrancy is 2154 * with other console code and prevention of re-entrancy is
2155 * ensured with console_sem. 2155 * ensured with console_sem.
2156 */ 2156 */
2157static void console_callback(void *ignored) 2157static void console_callback(struct work_struct *ignored)
2158{ 2158{
2159 acquire_console_sem(); 2159 acquire_console_sem();
2160 2160
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 05f8ce2cfb4a..b418b16e910e 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,9 +31,11 @@
31#include <linux/connector.h> 31#include <linux/connector.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33 33
34void cn_queue_wrapper(void *data) 34void cn_queue_wrapper(struct work_struct *work)
35{ 35{
36 struct cn_callback_data *d = data; 36 struct cn_callback_entry *cbq =
37 container_of(work, struct cn_callback_entry, work.work);
38 struct cn_callback_data *d = &cbq->data;
37 39
38 d->callback(d->callback_priv); 40 d->callback(d->callback_priv);
39 41
@@ -57,7 +59,7 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc
57 memcpy(&cbq->id.id, id, sizeof(struct cb_id)); 59 memcpy(&cbq->id.id, id, sizeof(struct cb_id));
58 cbq->data.callback = callback; 60 cbq->data.callback = callback;
59 61
60 INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); 62 INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
61 return cbq; 63 return cbq;
62} 64}
63 65
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index b49bacfd8de8..5e7cd45d10ee 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -135,40 +135,39 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
135 spin_lock_bh(&dev->cbdev->queue_lock); 135 spin_lock_bh(&dev->cbdev->queue_lock);
136 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 136 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
137 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 137 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
138 if (likely(!test_bit(0, &__cbq->work.pending) && 138 if (likely(!test_bit(WORK_STRUCT_PENDING,
139 &__cbq->work.work.management) &&
139 __cbq->data.ddata == NULL)) { 140 __cbq->data.ddata == NULL)) {
140 __cbq->data.callback_priv = msg; 141 __cbq->data.callback_priv = msg;
141 142
142 __cbq->data.ddata = data; 143 __cbq->data.ddata = data;
143 __cbq->data.destruct_data = destruct_data; 144 __cbq->data.destruct_data = destruct_data;
144 145
145 if (queue_work(dev->cbdev->cn_queue, 146 if (queue_delayed_work(
146 &__cbq->work)) 147 dev->cbdev->cn_queue,
148 &__cbq->work, 0))
147 err = 0; 149 err = 0;
148 } else { 150 } else {
149 struct work_struct *w;
150 struct cn_callback_data *d; 151 struct cn_callback_data *d;
151 152
152 w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC); 153 __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
153 if (w) { 154 if (__cbq) {
154 d = (struct cn_callback_data *)(w+1); 155 d = &__cbq->data;
155
156 d->callback_priv = msg; 156 d->callback_priv = msg;
157 d->callback = __cbq->data.callback; 157 d->callback = __cbq->data.callback;
158 d->ddata = data; 158 d->ddata = data;
159 d->destruct_data = destruct_data; 159 d->destruct_data = destruct_data;
160 d->free = w; 160 d->free = __cbq;
161 161
162 INIT_LIST_HEAD(&w->entry); 162 INIT_DELAYED_WORK(&__cbq->work,
163 w->pending = 0; 163 &cn_queue_wrapper);
164 w->func = &cn_queue_wrapper;
165 w->data = d;
166 init_timer(&w->timer);
167 164
168 if (queue_work(dev->cbdev->cn_queue, w)) 165 if (queue_delayed_work(
166 dev->cbdev->cn_queue,
167 &__cbq->work, 0))
169 err = 0; 168 err = 0;
170 else { 169 else {
171 kfree(w); 170 kfree(__cbq);
172 err = -EINVAL; 171 err = -EINVAL;
173 } 172 }
174 } else 173 } else
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index dd0c2623e27b..7a7c6e6dfe4f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
42 42
43/* internal prototypes */ 43/* internal prototypes */
44static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 44static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
45static void handle_update(void *data); 45static void handle_update(struct work_struct *work);
46 46
47/** 47/**
48 * Two notifier lists: the "policy" list is involved in the 48 * Two notifier lists: the "policy" list is involved in the
@@ -665,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
665 mutex_init(&policy->lock); 665 mutex_init(&policy->lock);
666 mutex_lock(&policy->lock); 666 mutex_lock(&policy->lock);
667 init_completion(&policy->kobj_unregister); 667 init_completion(&policy->kobj_unregister);
668 INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); 668 INIT_WORK(&policy->update, handle_update);
669 669
670 /* call driver. From then on the cpufreq must be able 670 /* call driver. From then on the cpufreq must be able
671 * to accept all calls to ->verify and ->setpolicy for this CPU 671 * to accept all calls to ->verify and ->setpolicy for this CPU
@@ -895,9 +895,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
895} 895}
896 896
897 897
898static void handle_update(void *data) 898static void handle_update(struct work_struct *work)
899{ 899{
900 unsigned int cpu = (unsigned int)(long)data; 900 struct cpufreq_policy *policy =
901 container_of(work, struct cpufreq_policy, update);
902 unsigned int cpu = policy->cpu;
901 dprintk("handle_update for cpu %u called\n", cpu); 903 dprintk("handle_update for cpu %u called\n", cpu);
902 cpufreq_update_policy(cpu); 904 cpufreq_update_policy(cpu);
903} 905}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c4c578defabf..5ef5ede5b884 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -59,7 +59,7 @@ static unsigned int def_sampling_rate;
59#define MAX_SAMPLING_DOWN_FACTOR (10) 59#define MAX_SAMPLING_DOWN_FACTOR (10)
60#define TRANSITION_LATENCY_LIMIT (10 * 1000) 60#define TRANSITION_LATENCY_LIMIT (10 * 1000)
61 61
62static void do_dbs_timer(void *data); 62static void do_dbs_timer(struct work_struct *work);
63 63
64struct cpu_dbs_info_s { 64struct cpu_dbs_info_s {
65 struct cpufreq_policy *cur_policy; 65 struct cpufreq_policy *cur_policy;
@@ -82,7 +82,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
82 * is recursive for the same process. -Venki 82 * is recursive for the same process. -Venki
83 */ 83 */
84static DEFINE_MUTEX (dbs_mutex); 84static DEFINE_MUTEX (dbs_mutex);
85static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 85static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
86 86
87struct dbs_tuners { 87struct dbs_tuners {
88 unsigned int sampling_rate; 88 unsigned int sampling_rate;
@@ -420,7 +420,7 @@ static void dbs_check_cpu(int cpu)
420 } 420 }
421} 421}
422 422
423static void do_dbs_timer(void *data) 423static void do_dbs_timer(struct work_struct *work)
424{ 424{
425 int i; 425 int i;
426 lock_cpu_hotplug(); 426 lock_cpu_hotplug();
@@ -435,7 +435,6 @@ static void do_dbs_timer(void *data)
435 435
436static inline void dbs_timer_init(void) 436static inline void dbs_timer_init(void)
437{ 437{
438 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
439 schedule_delayed_work(&dbs_work, 438 schedule_delayed_work(&dbs_work,
440 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 439 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
441 return; 440 return;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bf8aa45d4f01..e1cc5113c2ae 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,13 +47,17 @@ static unsigned int def_sampling_rate;
47#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 47#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
48#define TRANSITION_LATENCY_LIMIT (10 * 1000) 48#define TRANSITION_LATENCY_LIMIT (10 * 1000)
49 49
50static void do_dbs_timer(void *data); 50static void do_dbs_timer(struct work_struct *work);
51
52/* Sampling types */
53enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
51 54
52struct cpu_dbs_info_s { 55struct cpu_dbs_info_s {
53 cputime64_t prev_cpu_idle; 56 cputime64_t prev_cpu_idle;
54 cputime64_t prev_cpu_wall; 57 cputime64_t prev_cpu_wall;
55 struct cpufreq_policy *cur_policy; 58 struct cpufreq_policy *cur_policy;
56 struct work_struct work; 59 struct delayed_work work;
60 enum dbs_sample sample_type;
57 unsigned int enable; 61 unsigned int enable;
58 struct cpufreq_frequency_table *freq_table; 62 struct cpufreq_frequency_table *freq_table;
59 unsigned int freq_lo; 63 unsigned int freq_lo;
@@ -407,30 +411,31 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
407 } 411 }
408} 412}
409 413
410/* Sampling types */ 414static void do_dbs_timer(struct work_struct *work)
411enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
412
413static void do_dbs_timer(void *data)
414{ 415{
415 unsigned int cpu = smp_processor_id(); 416 unsigned int cpu = smp_processor_id();
416 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 417 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
418 enum dbs_sample sample_type = dbs_info->sample_type;
417 /* We want all CPUs to do sampling nearly on same jiffy */ 419 /* We want all CPUs to do sampling nearly on same jiffy */
418 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 420 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
421
422 /* Permit rescheduling of this work item */
423 work_release(work);
424
419 delay -= jiffies % delay; 425 delay -= jiffies % delay;
420 426
421 if (!dbs_info->enable) 427 if (!dbs_info->enable)
422 return; 428 return;
423 /* Common NORMAL_SAMPLE setup */ 429 /* Common NORMAL_SAMPLE setup */
424 INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE); 430 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
425 if (!dbs_tuners_ins.powersave_bias || 431 if (!dbs_tuners_ins.powersave_bias ||
426 (unsigned long) data == DBS_NORMAL_SAMPLE) { 432 sample_type == DBS_NORMAL_SAMPLE) {
427 lock_cpu_hotplug(); 433 lock_cpu_hotplug();
428 dbs_check_cpu(dbs_info); 434 dbs_check_cpu(dbs_info);
429 unlock_cpu_hotplug(); 435 unlock_cpu_hotplug();
430 if (dbs_info->freq_lo) { 436 if (dbs_info->freq_lo) {
431 /* Setup timer for SUB_SAMPLE */ 437 /* Setup timer for SUB_SAMPLE */
432 INIT_WORK(&dbs_info->work, do_dbs_timer, 438 dbs_info->sample_type = DBS_SUB_SAMPLE;
433 (void *)DBS_SUB_SAMPLE);
434 delay = dbs_info->freq_hi_jiffies; 439 delay = dbs_info->freq_hi_jiffies;
435 } 440 }
436 } else { 441 } else {
@@ -449,7 +454,8 @@ static inline void dbs_timer_init(unsigned int cpu)
449 delay -= jiffies % delay; 454 delay -= jiffies % delay;
450 455
451 ondemand_powersave_bias_init(); 456 ondemand_powersave_bias_init();
452 INIT_WORK(&dbs_info->work, do_dbs_timer, NULL); 457 INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
458 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
453 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 459 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
454} 460}
455 461
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
index 4630f1969a09..15edf40828b4 100644
--- a/drivers/i2c/chips/ds1374.c
+++ b/drivers/i2c/chips/ds1374.c
@@ -140,12 +140,14 @@ ulong ds1374_get_rtc_time(void)
140 return t1; 140 return t1;
141} 141}
142 142
143static void ds1374_set_work(void *arg) 143static ulong new_time;
144
145static void ds1374_set_work(struct work_struct *work)
144{ 146{
145 ulong t1, t2; 147 ulong t1, t2;
146 int limit = 10; /* arbitrary retry limit */ 148 int limit = 10; /* arbitrary retry limit */
147 149
148 t1 = *(ulong *) arg; 150 t1 = new_time;
149 151
150 mutex_lock(&ds1374_mutex); 152 mutex_lock(&ds1374_mutex);
151 153
@@ -167,11 +169,9 @@ static void ds1374_set_work(void *arg)
167 "can't confirm time set from rtc chip\n"); 169 "can't confirm time set from rtc chip\n");
168} 170}
169 171
170static ulong new_time;
171
172static struct workqueue_struct *ds1374_workqueue; 172static struct workqueue_struct *ds1374_workqueue;
173 173
174static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time); 174static DECLARE_WORK(ds1374_work, ds1374_set_work);
175 175
176int ds1374_set_rtc_time(ulong nowtime) 176int ds1374_set_rtc_time(ulong nowtime)
177{ 177{
@@ -180,7 +180,7 @@ int ds1374_set_rtc_time(ulong nowtime)
180 if (in_interrupt()) 180 if (in_interrupt())
181 queue_work(ds1374_workqueue, &ds1374_work); 181 queue_work(ds1374_workqueue, &ds1374_work);
182 else 182 else
183 ds1374_set_work(&new_time); 183 ds1374_set_work(NULL);
184 184
185 return 0; 185 return 0;
186} 186}
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index d90a3a1898c0..8f4378a1631c 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -31,9 +31,10 @@
31#include "config_roms.h" 31#include "config_roms.h"
32 32
33 33
34static void delayed_reset_bus(void * __reset_info) 34static void delayed_reset_bus(struct work_struct *work)
35{ 35{
36 struct hpsb_host *host = (struct hpsb_host*)__reset_info; 36 struct hpsb_host *host =
37 container_of(work, struct hpsb_host, delayed_reset.work);
37 int generation = host->csr.generation + 1; 38 int generation = host->csr.generation + 1;
38 39
39 /* The generation field rolls over to 2 rather than 0 per IEEE 40 /* The generation field rolls over to 2 rather than 0 per IEEE
@@ -145,7 +146,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
145 146
146 atomic_set(&h->generation, 0); 147 atomic_set(&h->generation, 0);
147 148
148 INIT_WORK(&h->delayed_reset, delayed_reset_bus, h); 149 INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
149 150
150 init_timer(&h->timeout); 151 init_timer(&h->timeout);
151 h->timeout.data = (unsigned long) h; 152 h->timeout.data = (unsigned long) h;
@@ -234,7 +235,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host)
234 * Config ROM in the near future. */ 235 * Config ROM in the near future. */
235 reset_delay = HZ; 236 reset_delay = HZ;
236 237
237 PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host); 238 PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
238 schedule_delayed_work(&host->delayed_reset, reset_delay); 239 schedule_delayed_work(&host->delayed_reset, reset_delay);
239 240
240 return 0; 241 return 0;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index bc6dbfadb891..d553e38c9543 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -62,7 +62,7 @@ struct hpsb_host {
62 struct class_device class_dev; 62 struct class_device class_dev;
63 63
64 int update_config_rom; 64 int update_config_rom;
65 struct work_struct delayed_reset; 65 struct delayed_work delayed_reset;
66 unsigned int config_roms; 66 unsigned int config_roms;
67 67
68 struct list_head addr_space; 68 struct list_head addr_space;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 6986ac188281..cd156d4e779e 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -493,20 +493,25 @@ static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id,
493 scsi_unblock_requests(scsi_id->scsi_host); 493 scsi_unblock_requests(scsi_id->scsi_host);
494} 494}
495 495
496static void sbp2util_write_orb_pointer(void *p) 496static void sbp2util_write_orb_pointer(struct work_struct *work)
497{ 497{
498 struct scsi_id_instance_data *scsi_id =
499 container_of(work, struct scsi_id_instance_data,
500 protocol_work.work);
498 quadlet_t data[2]; 501 quadlet_t data[2];
499 502
500 data[0] = ORB_SET_NODE_ID( 503 data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
501 ((struct scsi_id_instance_data *)p)->hi->host->node_id); 504 data[1] = scsi_id->last_orb_dma;
502 data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
503 sbp2util_cpu_to_be32_buffer(data, 8); 505 sbp2util_cpu_to_be32_buffer(data, 8);
504 sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8); 506 sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
505} 507}
506 508
507static void sbp2util_write_doorbell(void *p) 509static void sbp2util_write_doorbell(struct work_struct *work)
508{ 510{
509 sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4); 511 struct scsi_id_instance_data *scsi_id =
512 container_of(work, struct scsi_id_instance_data,
513 protocol_work.work);
514 sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
510} 515}
511 516
512/* 517/*
@@ -843,7 +848,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
843 INIT_LIST_HEAD(&scsi_id->scsi_list); 848 INIT_LIST_HEAD(&scsi_id->scsi_list);
844 spin_lock_init(&scsi_id->sbp2_command_orb_lock); 849 spin_lock_init(&scsi_id->sbp2_command_orb_lock);
845 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); 850 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
846 INIT_WORK(&scsi_id->protocol_work, NULL, NULL); 851 INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
847 852
848 ud->device.driver_data = scsi_id; 853 ud->device.driver_data = scsi_id;
849 854
@@ -2047,11 +2052,10 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
2047 * We do not accept new commands until the job is over. 2052 * We do not accept new commands until the job is over.
2048 */ 2053 */
2049 scsi_block_requests(scsi_id->scsi_host); 2054 scsi_block_requests(scsi_id->scsi_host);
2050 PREPARE_WORK(&scsi_id->protocol_work, 2055 PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
2051 last_orb ? sbp2util_write_doorbell: 2056 last_orb ? sbp2util_write_doorbell:
2052 sbp2util_write_orb_pointer, 2057 sbp2util_write_orb_pointer);
2053 scsi_id); 2058 schedule_delayed_work(&scsi_id->protocol_work, 0);
2054 schedule_work(&scsi_id->protocol_work);
2055 } 2059 }
2056} 2060}
2057 2061
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index abbe48e646c3..1b16d6b9cf11 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -348,7 +348,7 @@ struct scsi_id_instance_data {
348 unsigned workarounds; 348 unsigned workarounds;
349 349
350 atomic_t state; 350 atomic_t state;
351 struct work_struct protocol_work; 351 struct delayed_work protocol_work;
352}; 352};
353 353
354/* For use in scsi_id_instance_data.state */ 354/* For use in scsi_id_instance_data.state */
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 7767a11b6890..af939796750d 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -55,11 +55,11 @@ struct addr_req {
55 int status; 55 int status;
56}; 56};
57 57
58static void process_req(void *data); 58static void process_req(struct work_struct *work);
59 59
60static DEFINE_MUTEX(lock); 60static DEFINE_MUTEX(lock);
61static LIST_HEAD(req_list); 61static LIST_HEAD(req_list);
62static DECLARE_WORK(work, process_req, NULL); 62static DECLARE_DELAYED_WORK(work, process_req);
63static struct workqueue_struct *addr_wq; 63static struct workqueue_struct *addr_wq;
64 64
65void rdma_addr_register_client(struct rdma_addr_client *client) 65void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -215,7 +215,7 @@ out:
215 return ret; 215 return ret;
216} 216}
217 217
218static void process_req(void *data) 218static void process_req(struct work_struct *work)
219{ 219{
220 struct addr_req *req, *temp_req; 220 struct addr_req *req, *temp_req;
221 struct sockaddr_in *src_in, *dst_in; 221 struct sockaddr_in *src_in, *dst_in;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 20e9f64e67a6..98272fbbfb31 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -285,9 +285,10 @@ err:
285 kfree(tprops); 285 kfree(tprops);
286} 286}
287 287
288static void ib_cache_task(void *work_ptr) 288static void ib_cache_task(struct work_struct *_work)
289{ 289{
290 struct ib_update_work *work = work_ptr; 290 struct ib_update_work *work =
291 container_of(_work, struct ib_update_work, work);
291 292
292 ib_cache_update(work->device, work->port_num); 293 ib_cache_update(work->device, work->port_num);
293 kfree(work); 294 kfree(work);
@@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
306 event->event == IB_EVENT_CLIENT_REREGISTER) { 307 event->event == IB_EVENT_CLIENT_REREGISTER) {
307 work = kmalloc(sizeof *work, GFP_ATOMIC); 308 work = kmalloc(sizeof *work, GFP_ATOMIC);
308 if (work) { 309 if (work) {
309 INIT_WORK(&work->work, ib_cache_task, work); 310 INIT_WORK(&work->work, ib_cache_task);
310 work->device = event->device; 311 work->device = event->device;
311 work->port_num = event->element.port_num; 312 work->port_num = event->element.port_num;
312 schedule_work(&work->work); 313 schedule_work(&work->work);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e5dc4530808a..79c937bf6962 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -101,7 +101,7 @@ struct cm_av {
101}; 101};
102 102
103struct cm_work { 103struct cm_work {
104 struct work_struct work; 104 struct delayed_work work;
105 struct list_head list; 105 struct list_head list;
106 struct cm_port *port; 106 struct cm_port *port;
107 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 107 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
@@ -161,7 +161,7 @@ struct cm_id_private {
161 atomic_t work_count; 161 atomic_t work_count;
162}; 162};
163 163
164static void cm_work_handler(void *data); 164static void cm_work_handler(struct work_struct *work);
165 165
166static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 166static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
167{ 167{
@@ -668,8 +668,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
668 return ERR_PTR(-ENOMEM); 668 return ERR_PTR(-ENOMEM);
669 669
670 timewait_info->work.local_id = local_id; 670 timewait_info->work.local_id = local_id;
671 INIT_WORK(&timewait_info->work.work, cm_work_handler, 671 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
672 &timewait_info->work);
673 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 672 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
674 return timewait_info; 673 return timewait_info;
675} 674}
@@ -2995,9 +2994,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
2995 } 2994 }
2996} 2995}
2997 2996
2998static void cm_work_handler(void *data) 2997static void cm_work_handler(struct work_struct *_work)
2999{ 2998{
3000 struct cm_work *work = data; 2999 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3001 int ret; 3000 int ret;
3002 3001
3003 switch (work->cm_event.event) { 3002 switch (work->cm_event.event) {
@@ -3087,12 +3086,12 @@ static int cm_establish(struct ib_cm_id *cm_id)
3087 * we need to find the cm_id once we're in the context of the 3086 * we need to find the cm_id once we're in the context of the
3088 * worker thread, rather than holding a reference on it. 3087 * worker thread, rather than holding a reference on it.
3089 */ 3088 */
3090 INIT_WORK(&work->work, cm_work_handler, work); 3089 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3091 work->local_id = cm_id->local_id; 3090 work->local_id = cm_id->local_id;
3092 work->remote_id = cm_id->remote_id; 3091 work->remote_id = cm_id->remote_id;
3093 work->mad_recv_wc = NULL; 3092 work->mad_recv_wc = NULL;
3094 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3093 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3095 queue_work(cm.wq, &work->work); 3094 queue_delayed_work(cm.wq, &work->work, 0);
3096out: 3095out:
3097 return ret; 3096 return ret;
3098} 3097}
@@ -3191,11 +3190,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3191 return; 3190 return;
3192 } 3191 }
3193 3192
3194 INIT_WORK(&work->work, cm_work_handler, work); 3193 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3195 work->cm_event.event = event; 3194 work->cm_event.event = event;
3196 work->mad_recv_wc = mad_recv_wc; 3195 work->mad_recv_wc = mad_recv_wc;
3197 work->port = (struct cm_port *)mad_agent->context; 3196 work->port = (struct cm_port *)mad_agent->context;
3198 queue_work(cm.wq, &work->work); 3197 queue_delayed_work(cm.wq, &work->work, 0);
3199} 3198}
3200 3199
3201static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3200static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index cf48f2697434..985a6b564d8f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1340,9 +1340,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1340 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1340 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1341} 1341}
1342 1342
1343static void cma_work_handler(void *data) 1343static void cma_work_handler(struct work_struct *_work)
1344{ 1344{
1345 struct cma_work *work = data; 1345 struct cma_work *work = container_of(_work, struct cma_work, work);
1346 struct rdma_id_private *id_priv = work->id; 1346 struct rdma_id_private *id_priv = work->id;
1347 int destroy = 0; 1347 int destroy = 0;
1348 1348
@@ -1373,7 +1373,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1373 return -ENOMEM; 1373 return -ENOMEM;
1374 1374
1375 work->id = id_priv; 1375 work->id = id_priv;
1376 INIT_WORK(&work->work, cma_work_handler, work); 1376 INIT_WORK(&work->work, cma_work_handler);
1377 work->old_state = CMA_ROUTE_QUERY; 1377 work->old_state = CMA_ROUTE_QUERY;
1378 work->new_state = CMA_ROUTE_RESOLVED; 1378 work->new_state = CMA_ROUTE_RESOLVED;
1379 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1379 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1430,7 +1430,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1430 return -ENOMEM; 1430 return -ENOMEM;
1431 1431
1432 work->id = id_priv; 1432 work->id = id_priv;
1433 INIT_WORK(&work->work, cma_work_handler, work); 1433 INIT_WORK(&work->work, cma_work_handler);
1434 work->old_state = CMA_ROUTE_QUERY; 1434 work->old_state = CMA_ROUTE_QUERY;
1435 work->new_state = CMA_ROUTE_RESOLVED; 1435 work->new_state = CMA_ROUTE_RESOLVED;
1436 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1436 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1583,7 +1583,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1583 } 1583 }
1584 1584
1585 work->id = id_priv; 1585 work->id = id_priv;
1586 INIT_WORK(&work->work, cma_work_handler, work); 1586 INIT_WORK(&work->work, cma_work_handler);
1587 work->old_state = CMA_ADDR_QUERY; 1587 work->old_state = CMA_ADDR_QUERY;
1588 work->new_state = CMA_ADDR_RESOLVED; 1588 work->new_state = CMA_ADDR_RESOLVED;
1589 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1589 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index cf797d7aea09..1039ad57d53b 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -828,9 +828,9 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
828 * thread asleep on the destroy_comp list vs. an object destroyed 828 * thread asleep on the destroy_comp list vs. an object destroyed
829 * here synchronously when the last reference is removed. 829 * here synchronously when the last reference is removed.
830 */ 830 */
831static void cm_work_handler(void *arg) 831static void cm_work_handler(struct work_struct *_work)
832{ 832{
833 struct iwcm_work *work = arg; 833 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
834 struct iw_cm_event levent; 834 struct iw_cm_event levent;
835 struct iwcm_id_private *cm_id_priv = work->cm_id; 835 struct iwcm_id_private *cm_id_priv = work->cm_id;
836 unsigned long flags; 836 unsigned long flags;
@@ -900,7 +900,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
900 goto out; 900 goto out;
901 } 901 }
902 902
903 INIT_WORK(&work->work, cm_work_handler, work); 903 INIT_WORK(&work->work, cm_work_handler);
904 work->cm_id = cm_id_priv; 904 work->cm_id = cm_id_priv;
905 work->event = *iw_event; 905 work->event = *iw_event;
906 906
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 3f9c16232c4d..15f38d94b3a8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent(
65static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 65static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66 struct ib_mad_private *mad); 66 struct ib_mad_private *mad);
67static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 67static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
68static void timeout_sends(void *data); 68static void timeout_sends(struct work_struct *work);
69static void local_completions(void *data); 69static void local_completions(struct work_struct *work);
70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv, 71 struct ib_mad_agent_private *agent_priv,
72 u8 mgmt_class); 72 u8 mgmt_class);
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
356 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 356 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
357 INIT_LIST_HEAD(&mad_agent_priv->done_list); 357 INIT_LIST_HEAD(&mad_agent_priv->done_list);
358 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 358 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
359 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); 359 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
360 INIT_LIST_HEAD(&mad_agent_priv->local_list); 360 INIT_LIST_HEAD(&mad_agent_priv->local_list);
361 INIT_WORK(&mad_agent_priv->local_work, local_completions, 361 INIT_WORK(&mad_agent_priv->local_work, local_completions);
362 mad_agent_priv);
363 atomic_set(&mad_agent_priv->refcount, 1); 362 atomic_set(&mad_agent_priv->refcount, 1);
364 init_completion(&mad_agent_priv->comp); 363 init_completion(&mad_agent_priv->comp);
365 364
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
2198/* 2197/*
2199 * IB MAD completion callback 2198 * IB MAD completion callback
2200 */ 2199 */
2201static void ib_mad_completion_handler(void *data) 2200static void ib_mad_completion_handler(struct work_struct *work)
2202{ 2201{
2203 struct ib_mad_port_private *port_priv; 2202 struct ib_mad_port_private *port_priv;
2204 struct ib_wc wc; 2203 struct ib_wc wc;
2205 2204
2206 port_priv = (struct ib_mad_port_private *)data; 2205 port_priv = container_of(work, struct ib_mad_port_private, work);
2207 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 2206 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2208 2207
2209 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { 2208 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2324} 2323}
2325EXPORT_SYMBOL(ib_cancel_mad); 2324EXPORT_SYMBOL(ib_cancel_mad);
2326 2325
2327static void local_completions(void *data) 2326static void local_completions(struct work_struct *work)
2328{ 2327{
2329 struct ib_mad_agent_private *mad_agent_priv; 2328 struct ib_mad_agent_private *mad_agent_priv;
2330 struct ib_mad_local_private *local; 2329 struct ib_mad_local_private *local;
@@ -2334,7 +2333,8 @@ static void local_completions(void *data)
2334 struct ib_wc wc; 2333 struct ib_wc wc;
2335 struct ib_mad_send_wc mad_send_wc; 2334 struct ib_mad_send_wc mad_send_wc;
2336 2335
2337 mad_agent_priv = (struct ib_mad_agent_private *)data; 2336 mad_agent_priv =
2337 container_of(work, struct ib_mad_agent_private, local_work);
2338 2338
2339 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2339 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2340 while (!list_empty(&mad_agent_priv->local_list)) { 2340 while (!list_empty(&mad_agent_priv->local_list)) {
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2434 return ret; 2434 return ret;
2435} 2435}
2436 2436
2437static void timeout_sends(void *data) 2437static void timeout_sends(struct work_struct *work)
2438{ 2438{
2439 struct ib_mad_agent_private *mad_agent_priv; 2439 struct ib_mad_agent_private *mad_agent_priv;
2440 struct ib_mad_send_wr_private *mad_send_wr; 2440 struct ib_mad_send_wr_private *mad_send_wr;
2441 struct ib_mad_send_wc mad_send_wc; 2441 struct ib_mad_send_wc mad_send_wc;
2442 unsigned long flags, delay; 2442 unsigned long flags, delay;
2443 2443
2444 mad_agent_priv = (struct ib_mad_agent_private *)data; 2444 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2445 timed_work.work);
2445 mad_send_wc.vendor_err = 0; 2446 mad_send_wc.vendor_err = 0;
2446 2447
2447 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2448 spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2799,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
2799 ret = -ENOMEM; 2800 ret = -ENOMEM;
2800 goto error8; 2801 goto error8;
2801 } 2802 }
2802 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); 2803 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2803 2804
2804 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 2805 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2805 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 2806 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d06b59083f6e..d5548e73e068 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -102,7 +102,7 @@ struct ib_mad_agent_private {
102 struct list_head send_list; 102 struct list_head send_list;
103 struct list_head wait_list; 103 struct list_head wait_list;
104 struct list_head done_list; 104 struct list_head done_list;
105 struct work_struct timed_work; 105 struct delayed_work timed_work;
106 unsigned long timeout; 106 unsigned long timeout;
107 struct list_head local_list; 107 struct list_head local_list;
108 struct work_struct local_work; 108 struct work_struct local_work;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 1ef79d015a1e..3663fd7022be 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -45,8 +45,8 @@ enum rmpp_state {
45struct mad_rmpp_recv { 45struct mad_rmpp_recv {
46 struct ib_mad_agent_private *agent; 46 struct ib_mad_agent_private *agent;
47 struct list_head list; 47 struct list_head list;
48 struct work_struct timeout_work; 48 struct delayed_work timeout_work;
49 struct work_struct cleanup_work; 49 struct delayed_work cleanup_work;
50 struct completion comp; 50 struct completion comp;
51 enum rmpp_state state; 51 enum rmpp_state state;
52 spinlock_t lock; 52 spinlock_t lock;
@@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent,
233 } 233 }
234} 234}
235 235
236static void recv_timeout_handler(void *data) 236static void recv_timeout_handler(struct work_struct *work)
237{ 237{
238 struct mad_rmpp_recv *rmpp_recv = data; 238 struct mad_rmpp_recv *rmpp_recv =
239 container_of(work, struct mad_rmpp_recv, timeout_work.work);
239 struct ib_mad_recv_wc *rmpp_wc; 240 struct ib_mad_recv_wc *rmpp_wc;
240 unsigned long flags; 241 unsigned long flags;
241 242
@@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data)
254 ib_free_recv_mad(rmpp_wc); 255 ib_free_recv_mad(rmpp_wc);
255} 256}
256 257
257static void recv_cleanup_handler(void *data) 258static void recv_cleanup_handler(struct work_struct *work)
258{ 259{
259 struct mad_rmpp_recv *rmpp_recv = data; 260 struct mad_rmpp_recv *rmpp_recv =
261 container_of(work, struct mad_rmpp_recv, cleanup_work.work);
260 unsigned long flags; 262 unsigned long flags;
261 263
262 spin_lock_irqsave(&rmpp_recv->agent->lock, flags); 264 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
@@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
285 287
286 rmpp_recv->agent = agent; 288 rmpp_recv->agent = agent;
287 init_completion(&rmpp_recv->comp); 289 init_completion(&rmpp_recv->comp);
288 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); 290 INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
289 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); 291 INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
290 spin_lock_init(&rmpp_recv->lock); 292 spin_lock_init(&rmpp_recv->lock);
291 rmpp_recv->state = RMPP_STATE_ACTIVE; 293 rmpp_recv->state = RMPP_STATE_ACTIVE;
292 atomic_set(&rmpp_recv->refcount, 1); 294 atomic_set(&rmpp_recv->refcount, 1);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1706d3c7e95e..e45afba75341 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref)
360 kfree(sm_ah); 360 kfree(sm_ah);
361} 361}
362 362
363static void update_sm_ah(void *port_ptr) 363static void update_sm_ah(struct work_struct *work)
364{ 364{
365 struct ib_sa_port *port = port_ptr; 365 struct ib_sa_port *port =
366 container_of(work, struct ib_sa_port, update_task);
366 struct ib_sa_sm_ah *new_ah, *old_ah; 367 struct ib_sa_sm_ah *new_ah, *old_ah;
367 struct ib_port_attr port_attr; 368 struct ib_port_attr port_attr;
368 struct ib_ah_attr ah_attr; 369 struct ib_ah_attr ah_attr;
@@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device)
992 if (IS_ERR(sa_dev->port[i].agent)) 993 if (IS_ERR(sa_dev->port[i].agent))
993 goto err; 994 goto err;
994 995
995 INIT_WORK(&sa_dev->port[i].update_task, 996 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
996 update_sm_ah, &sa_dev->port[i]);
997 } 997 }
998 998
999 ib_set_client_data(device, &sa_client, sa_dev); 999 ib_set_client_data(device, &sa_client, sa_dev);
@@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device)
1010 goto err; 1010 goto err;
1011 1011
1012 for (i = 0; i <= e - s; ++i) 1012 for (i = 0; i <= e - s; ++i)
1013 update_sm_ah(&sa_dev->port[i]); 1013 update_sm_ah(&sa_dev->port[i].update_task);
1014 1014
1015 return; 1015 return;
1016 1016
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index efe147dbeb42..db12cc0841df 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
179 up_write(&current->mm->mmap_sem); 179 up_write(&current->mm->mmap_sem);
180} 180}
181 181
182static void ib_umem_account(void *work_ptr) 182static void ib_umem_account(struct work_struct *_work)
183{ 183{
184 struct ib_umem_account_work *work = work_ptr; 184 struct ib_umem_account_work *work =
185 container_of(_work, struct ib_umem_account_work, work);
185 186
186 down_write(&work->mm->mmap_sem); 187 down_write(&work->mm->mmap_sem);
187 work->mm->locked_vm -= work->diff; 188 work->mm->locked_vm -= work->diff;
@@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
216 return; 217 return;
217 } 218 }
218 219
219 INIT_WORK(&work->work, ib_umem_account, work); 220 INIT_WORK(&work->work, ib_umem_account);
220 work->mm = mm; 221 work->mm = mm;
221 work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; 222 work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
222 223
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 413754b1d8a2..8536aeb96af8 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -214,9 +214,10 @@ struct ipath_user_pages_work {
214 unsigned long num_pages; 214 unsigned long num_pages;
215}; 215};
216 216
217static void user_pages_account(void *ptr) 217static void user_pages_account(struct work_struct *_work)
218{ 218{
219 struct ipath_user_pages_work *work = ptr; 219 struct ipath_user_pages_work *work =
220 container_of(_work, struct ipath_user_pages_work, work);
220 221
221 down_write(&work->mm->mmap_sem); 222 down_write(&work->mm->mmap_sem);
222 work->mm->locked_vm -= work->num_pages; 223 work->mm->locked_vm -= work->num_pages;
@@ -242,7 +243,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
242 243
243 goto bail; 244 goto bail;
244 245
245 INIT_WORK(&work->work, user_pages_account, work); 246 INIT_WORK(&work->work, user_pages_account);
246 work->mm = mm; 247 work->mm = mm;
247 work->num_pages = num_pages; 248 work->num_pages = num_pages;
248 249
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index cd044ea2dfa4..e948158a28d9 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -57,7 +57,7 @@ static int catas_reset_disable;
57module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); 57module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
58MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); 58MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
59 59
60static void catas_reset(void *work_ptr) 60static void catas_reset(struct work_struct *work)
61{ 61{
62 struct mthca_dev *dev, *tmpdev; 62 struct mthca_dev *dev, *tmpdev;
63 LIST_HEAD(tlist); 63 LIST_HEAD(tlist);
@@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
203 203
204int __init mthca_catas_init(void) 204int __init mthca_catas_init(void)
205{ 205{
206 INIT_WORK(&catas_work, catas_reset, NULL); 206 INIT_WORK(&catas_work, catas_reset);
207 207
208 catas_wq = create_singlethread_workqueue("mthca_catas"); 208 catas_wq = create_singlethread_workqueue("mthca_catas");
209 if (!catas_wq) 209 if (!catas_wq)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index f2b61851a49c..99547996aba2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -136,11 +136,11 @@ struct ipoib_dev_priv {
136 struct list_head multicast_list; 136 struct list_head multicast_list;
137 struct rb_root multicast_tree; 137 struct rb_root multicast_tree;
138 138
139 struct work_struct pkey_task; 139 struct delayed_work pkey_task;
140 struct work_struct mcast_task; 140 struct delayed_work mcast_task;
141 struct work_struct flush_task; 141 struct work_struct flush_task;
142 struct work_struct restart_task; 142 struct work_struct restart_task;
143 struct work_struct ah_reap_task; 143 struct delayed_work ah_reap_task;
144 144
145 struct ib_device *ca; 145 struct ib_device *ca;
146 u8 port; 146 u8 port;
@@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev);
254 254
255void ipoib_send(struct net_device *dev, struct sk_buff *skb, 255void ipoib_send(struct net_device *dev, struct sk_buff *skb,
256 struct ipoib_ah *address, u32 qpn); 256 struct ipoib_ah *address, u32 qpn);
257void ipoib_reap_ah(void *dev_ptr); 257void ipoib_reap_ah(struct work_struct *work);
258 258
259void ipoib_flush_paths(struct net_device *dev); 259void ipoib_flush_paths(struct net_device *dev);
260struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); 260struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
261 261
262int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 262int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
263void ipoib_ib_dev_flush(void *dev); 263void ipoib_ib_dev_flush(struct work_struct *work);
264void ipoib_ib_dev_cleanup(struct net_device *dev); 264void ipoib_ib_dev_cleanup(struct net_device *dev);
265 265
266int ipoib_ib_dev_open(struct net_device *dev); 266int ipoib_ib_dev_open(struct net_device *dev);
@@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev);
271int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 271int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
272void ipoib_dev_cleanup(struct net_device *dev); 272void ipoib_dev_cleanup(struct net_device *dev);
273 273
274void ipoib_mcast_join_task(void *dev_ptr); 274void ipoib_mcast_join_task(struct work_struct *work);
275void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); 275void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
276 276
277void ipoib_mcast_restart_task(void *dev_ptr); 277void ipoib_mcast_restart_task(struct work_struct *work);
278int ipoib_mcast_start_thread(struct net_device *dev); 278int ipoib_mcast_start_thread(struct net_device *dev);
279int ipoib_mcast_stop_thread(struct net_device *dev, int flush); 279int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
280 280
@@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler,
312int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); 312int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
313int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); 313int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
314 314
315void ipoib_pkey_poll(void *dev); 315void ipoib_pkey_poll(struct work_struct *work);
316int ipoib_pkey_dev_delay_open(struct net_device *dev); 316int ipoib_pkey_dev_delay_open(struct net_device *dev);
317 317
318#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 318#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 8bf5e9ec7c95..f10fba5d3265 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -400,10 +400,11 @@ static void __ipoib_reap_ah(struct net_device *dev)
400 spin_unlock_irq(&priv->tx_lock); 400 spin_unlock_irq(&priv->tx_lock);
401} 401}
402 402
403void ipoib_reap_ah(void *dev_ptr) 403void ipoib_reap_ah(struct work_struct *work)
404{ 404{
405 struct net_device *dev = dev_ptr; 405 struct ipoib_dev_priv *priv =
406 struct ipoib_dev_priv *priv = netdev_priv(dev); 406 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
407 struct net_device *dev = priv->dev;
407 408
408 __ipoib_reap_ah(dev); 409 __ipoib_reap_ah(dev);
409 410
@@ -613,10 +614,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
613 return 0; 614 return 0;
614} 615}
615 616
616void ipoib_ib_dev_flush(void *_dev) 617void ipoib_ib_dev_flush(struct work_struct *work)
617{ 618{
618 struct net_device *dev = (struct net_device *)_dev; 619 struct ipoib_dev_priv *cpriv, *priv =
619 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; 620 container_of(work, struct ipoib_dev_priv, flush_task);
621 struct net_device *dev = priv->dev;
620 622
621 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { 623 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
622 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 624 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
@@ -638,14 +640,14 @@ void ipoib_ib_dev_flush(void *_dev)
638 */ 640 */
639 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 641 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
640 ipoib_ib_dev_up(dev); 642 ipoib_ib_dev_up(dev);
641 ipoib_mcast_restart_task(dev); 643 ipoib_mcast_restart_task(&priv->restart_task);
642 } 644 }
643 645
644 mutex_lock(&priv->vlan_mutex); 646 mutex_lock(&priv->vlan_mutex);
645 647
646 /* Flush any child interfaces too */ 648 /* Flush any child interfaces too */
647 list_for_each_entry(cpriv, &priv->child_intfs, list) 649 list_for_each_entry(cpriv, &priv->child_intfs, list)
648 ipoib_ib_dev_flush(cpriv->dev); 650 ipoib_ib_dev_flush(&cpriv->flush_task);
649 651
650 mutex_unlock(&priv->vlan_mutex); 652 mutex_unlock(&priv->vlan_mutex);
651} 653}
@@ -672,10 +674,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
672 * change async notification is available. 674 * change async notification is available.
673 */ 675 */
674 676
675void ipoib_pkey_poll(void *dev_ptr) 677void ipoib_pkey_poll(struct work_struct *work)
676{ 678{
677 struct net_device *dev = dev_ptr; 679 struct ipoib_dev_priv *priv =
678 struct ipoib_dev_priv *priv = netdev_priv(dev); 680 container_of(work, struct ipoib_dev_priv, pkey_task.work);
681 struct net_device *dev = priv->dev;
679 682
680 ipoib_pkey_dev_check_presence(dev); 683 ipoib_pkey_dev_check_presence(dev);
681 684
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5ba3154320b4..c09280243726 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -940,11 +940,11 @@ static void ipoib_setup(struct net_device *dev)
940 INIT_LIST_HEAD(&priv->dead_ahs); 940 INIT_LIST_HEAD(&priv->dead_ahs);
941 INIT_LIST_HEAD(&priv->multicast_list); 941 INIT_LIST_HEAD(&priv->multicast_list);
942 942
943 INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); 943 INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll);
944 INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); 944 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
945 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); 945 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
946 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); 946 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
947 INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); 947 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
948} 948}
949 949
950struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 950struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d282d65e3ee0..b04b72ca32ed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status,
399 mcast->backoff = 1; 399 mcast->backoff = 1;
400 mutex_lock(&mcast_mutex); 400 mutex_lock(&mcast_mutex);
401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
402 queue_work(ipoib_workqueue, &priv->mcast_task); 402 queue_delayed_work(ipoib_workqueue,
403 &priv->mcast_task, 0);
403 mutex_unlock(&mcast_mutex); 404 mutex_unlock(&mcast_mutex);
404 complete(&mcast->done); 405 complete(&mcast->done);
405 return; 406 return;
@@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status,
435 436
436 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { 437 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
437 if (status == -ETIMEDOUT) 438 if (status == -ETIMEDOUT)
438 queue_work(ipoib_workqueue, &priv->mcast_task); 439 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
440 0);
439 else 441 else
440 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 442 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
441 mcast->backoff * HZ); 443 mcast->backoff * HZ);
@@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
517 mcast->query_id = ret; 519 mcast->query_id = ret;
518} 520}
519 521
520void ipoib_mcast_join_task(void *dev_ptr) 522void ipoib_mcast_join_task(struct work_struct *work)
521{ 523{
522 struct net_device *dev = dev_ptr; 524 struct ipoib_dev_priv *priv =
523 struct ipoib_dev_priv *priv = netdev_priv(dev); 525 container_of(work, struct ipoib_dev_priv, mcast_task.work);
526 struct net_device *dev = priv->dev;
524 527
525 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) 528 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
526 return; 529 return;
@@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
610 613
611 mutex_lock(&mcast_mutex); 614 mutex_lock(&mcast_mutex);
612 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 615 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
613 queue_work(ipoib_workqueue, &priv->mcast_task); 616 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
614 mutex_unlock(&mcast_mutex); 617 mutex_unlock(&mcast_mutex);
615 618
616 spin_lock_irq(&priv->lock); 619 spin_lock_irq(&priv->lock);
@@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
818 } 821 }
819} 822}
820 823
821void ipoib_mcast_restart_task(void *dev_ptr) 824void ipoib_mcast_restart_task(struct work_struct *work)
822{ 825{
823 struct net_device *dev = dev_ptr; 826 struct ipoib_dev_priv *priv =
824 struct ipoib_dev_priv *priv = netdev_priv(dev); 827 container_of(work, struct ipoib_dev_priv, restart_task);
828 struct net_device *dev = priv->dev;
825 struct dev_mc_list *mclist; 829 struct dev_mc_list *mclist;
826 struct ipoib_mcast *mcast, *tmcast; 830 struct ipoib_mcast *mcast, *tmcast;
827 LIST_HEAD(remove_list); 831 LIST_HEAD(remove_list);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 18a000034996..693b77002897 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -48,7 +48,7 @@
48 48
49static void iser_cq_tasklet_fn(unsigned long data); 49static void iser_cq_tasklet_fn(unsigned long data);
50static void iser_cq_callback(struct ib_cq *cq, void *cq_context); 50static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
51static void iser_comp_error_worker(void *data); 51static void iser_comp_error_worker(struct work_struct *work);
52 52
53static void iser_cq_event_callback(struct ib_event *cause, void *context) 53static void iser_cq_event_callback(struct ib_event *cause, void *context)
54{ 54{
@@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn)
480 init_waitqueue_head(&ib_conn->wait); 480 init_waitqueue_head(&ib_conn->wait);
481 atomic_set(&ib_conn->post_recv_buf_count, 0); 481 atomic_set(&ib_conn->post_recv_buf_count, 0);
482 atomic_set(&ib_conn->post_send_buf_count, 0); 482 atomic_set(&ib_conn->post_send_buf_count, 0);
483 INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker, 483 INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
484 ib_conn);
485 INIT_LIST_HEAD(&ib_conn->conn_list); 484 INIT_LIST_HEAD(&ib_conn->conn_list);
486 spin_lock_init(&ib_conn->lock); 485 spin_lock_init(&ib_conn->lock);
487 486
@@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc)
754 return ret_val; 753 return ret_val;
755} 754}
756 755
757static void iser_comp_error_worker(void *data) 756static void iser_comp_error_worker(struct work_struct *work)
758{ 757{
759 struct iser_conn *ib_conn = data; 758 struct iser_conn *ib_conn =
759 container_of(work, struct iser_conn, comperror_work);
760 760
761 /* getting here when the state is UP means that the conn is being * 761 /* getting here when the state is UP means that the conn is being *
762 * terminated asynchronously from the iSCSI layer's perspective. */ 762 * terminated asynchronously from the iSCSI layer's perspective. */
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 64ab5fc7cca3..a6289595557b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -390,9 +390,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
390 wait_for_completion(&target->done); 390 wait_for_completion(&target->done);
391} 391}
392 392
393static void srp_remove_work(void *target_ptr) 393static void srp_remove_work(struct work_struct *work)
394{ 394{
395 struct srp_target_port *target = target_ptr; 395 struct srp_target_port *target =
396 container_of(work, struct srp_target_port, work);
396 397
397 spin_lock_irq(target->scsi_host->host_lock); 398 spin_lock_irq(target->scsi_host->host_lock);
398 if (target->state != SRP_TARGET_DEAD) { 399 if (target->state != SRP_TARGET_DEAD) {
@@ -575,7 +576,7 @@ err:
575 spin_lock_irq(target->scsi_host->host_lock); 576 spin_lock_irq(target->scsi_host->host_lock);
576 if (target->state == SRP_TARGET_CONNECTING) { 577 if (target->state == SRP_TARGET_CONNECTING) {
577 target->state = SRP_TARGET_DEAD; 578 target->state = SRP_TARGET_DEAD;
578 INIT_WORK(&target->work, srp_remove_work, target); 579 INIT_WORK(&target->work, srp_remove_work);
579 schedule_work(&target->work); 580 schedule_work(&target->work);
580 } 581 }
581 spin_unlock_irq(target->scsi_host->host_lock); 582 spin_unlock_irq(target->scsi_host->host_lock);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index cbb93669d1ce..8451b29a3db5 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -567,9 +567,9 @@ static int atkbd_set_leds(struct atkbd *atkbd)
567 * interrupt context. 567 * interrupt context.
568 */ 568 */
569 569
570static void atkbd_event_work(void *data) 570static void atkbd_event_work(struct work_struct *work)
571{ 571{
572 struct atkbd *atkbd = data; 572 struct atkbd *atkbd = container_of(work, struct atkbd, event_work);
573 573
574 mutex_lock(&atkbd->event_mutex); 574 mutex_lock(&atkbd->event_mutex);
575 575
@@ -943,7 +943,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
943 943
944 atkbd->dev = dev; 944 atkbd->dev = dev;
945 ps2_init(&atkbd->ps2dev, serio); 945 ps2_init(&atkbd->ps2dev, serio);
946 INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd); 946 INIT_WORK(&atkbd->event_work, atkbd_event_work);
947 mutex_init(&atkbd->event_mutex); 947 mutex_init(&atkbd->event_mutex);
948 948
949 switch (serio->id.type) { 949 switch (serio->id.type) {
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 979b93e33da7..b7f049b45b6b 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -572,9 +572,9 @@ lkkbd_event (struct input_dev *dev, unsigned int type, unsigned int code,
572 * were in. 572 * were in.
573 */ 573 */
574static void 574static void
575lkkbd_reinit (void *data) 575lkkbd_reinit (struct work_struct *work)
576{ 576{
577 struct lkkbd *lk = data; 577 struct lkkbd *lk = container_of(work, struct lkkbd, tq);
578 int division; 578 int division;
579 unsigned char leds_on = 0; 579 unsigned char leds_on = 0;
580 unsigned char leds_off = 0; 580 unsigned char leds_off = 0;
@@ -651,7 +651,7 @@ lkkbd_connect (struct serio *serio, struct serio_driver *drv)
651 651
652 lk->serio = serio; 652 lk->serio = serio;
653 lk->dev = input_dev; 653 lk->dev = input_dev;
654 INIT_WORK (&lk->tq, lkkbd_reinit, lk); 654 INIT_WORK (&lk->tq, lkkbd_reinit);
655 lk->bell_volume = bell_volume; 655 lk->bell_volume = bell_volume;
656 lk->keyclick_volume = keyclick_volume; 656 lk->keyclick_volume = keyclick_volume;
657 lk->ctrlclick_volume = ctrlclick_volume; 657 lk->ctrlclick_volume = ctrlclick_volume;
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index cac4781103c3..6cd887c5eb0a 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -208,9 +208,9 @@ static int sunkbd_initialize(struct sunkbd *sunkbd)
208 * were in. 208 * were in.
209 */ 209 */
210 210
211static void sunkbd_reinit(void *data) 211static void sunkbd_reinit(struct work_struct *work)
212{ 212{
213 struct sunkbd *sunkbd = data; 213 struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
214 214
215 wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); 215 wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
216 216
@@ -248,7 +248,7 @@ static int sunkbd_connect(struct serio *serio, struct serio_driver *drv)
248 sunkbd->serio = serio; 248 sunkbd->serio = serio;
249 sunkbd->dev = input_dev; 249 sunkbd->dev = input_dev;
250 init_waitqueue_head(&sunkbd->wait); 250 init_waitqueue_head(&sunkbd->wait);
251 INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd); 251 INIT_WORK(&sunkbd->tq, sunkbd_reinit);
252 snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys); 252 snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys);
253 253
254 serio_set_drvdata(serio, sunkbd); 254 serio_set_drvdata(serio, sunkbd);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 6f9b2c7cc9c2..52bb2226ce2f 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -888,9 +888,10 @@ static int psmouse_poll(struct psmouse *psmouse)
888 * psmouse_resync() attempts to re-validate current protocol. 888 * psmouse_resync() attempts to re-validate current protocol.
889 */ 889 */
890 890
891static void psmouse_resync(void *p) 891static void psmouse_resync(struct work_struct *work)
892{ 892{
893 struct psmouse *psmouse = p, *parent = NULL; 893 struct psmouse *parent = NULL, *psmouse =
894 container_of(work, struct psmouse, resync_work);
894 struct serio *serio = psmouse->ps2dev.serio; 895 struct serio *serio = psmouse->ps2dev.serio;
895 psmouse_ret_t rc = PSMOUSE_GOOD_DATA; 896 psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
896 int failed = 0, enabled = 0; 897 int failed = 0, enabled = 0;
@@ -1121,7 +1122,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
1121 goto out; 1122 goto out;
1122 1123
1123 ps2_init(&psmouse->ps2dev, serio); 1124 ps2_init(&psmouse->ps2dev, serio);
1124 INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse); 1125 INIT_WORK(&psmouse->resync_work, psmouse_resync);
1125 psmouse->dev = input_dev; 1126 psmouse->dev = input_dev;
1126 snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); 1127 snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
1127 1128
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index e5b1b60757bb..b3e84d3bb7f7 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -251,9 +251,9 @@ EXPORT_SYMBOL(ps2_command);
251 * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.) 251 * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.)
252 */ 252 */
253 253
254static void ps2_execute_scheduled_command(void *data) 254static void ps2_execute_scheduled_command(struct work_struct *work)
255{ 255{
256 struct ps2work *ps2work = data; 256 struct ps2work *ps2work = container_of(work, struct ps2work, work);
257 257
258 ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command); 258 ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command);
259 kfree(ps2work); 259 kfree(ps2work);
@@ -278,7 +278,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman
278 ps2work->ps2dev = ps2dev; 278 ps2work->ps2dev = ps2dev;
279 ps2work->command = command; 279 ps2work->command = command;
280 memcpy(ps2work->param, param, send); 280 memcpy(ps2work->param, param, send);
281 INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work); 281 INIT_WORK(&ps2work->work, ps2_execute_scheduled_command);
282 282
283 if (!schedule_work(&ps2work->work)) { 283 if (!schedule_work(&ps2work->work)) {
284 kfree(ps2work); 284 kfree(ps2work);
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c
index 6ae6eb322111..946c38cf6f8a 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/isdn/act2000/capi.c
@@ -627,8 +627,10 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
627} 627}
628 628
629void 629void
630actcapi_dispatch(act2000_card *card) 630actcapi_dispatch(struct work_struct *work)
631{ 631{
632 struct act2000_card *card =
633 container_of(work, struct act2000_card, rcv_tq);
632 struct sk_buff *skb; 634 struct sk_buff *skb;
633 actcapi_msg *msg; 635 actcapi_msg *msg;
634 __u16 ccmd; 636 __u16 ccmd;
diff --git a/drivers/isdn/act2000/capi.h b/drivers/isdn/act2000/capi.h
index 49f453c53c64..e55f6a931f66 100644
--- a/drivers/isdn/act2000/capi.h
+++ b/drivers/isdn/act2000/capi.h
@@ -356,7 +356,7 @@ extern int actcapi_connect_req(act2000_card *, act2000_chan *, char *, char, int
356extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *); 356extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *);
357extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *); 357extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *);
358extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8); 358extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8);
359extern void actcapi_dispatch(act2000_card *); 359extern void actcapi_dispatch(struct work_struct *);
360#ifdef DEBUG_MSG 360#ifdef DEBUG_MSG
361extern void actcapi_debug_msg(struct sk_buff *skb, int); 361extern void actcapi_debug_msg(struct sk_buff *skb, int);
362#else 362#else
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index d89dcde4eade..90593e2ef872 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -192,8 +192,11 @@ act2000_set_msn(act2000_card *card, char *eazmsn)
192} 192}
193 193
194static void 194static void
195act2000_transmit(struct act2000_card *card) 195act2000_transmit(struct work_struct *work)
196{ 196{
197 struct act2000_card *card =
198 container_of(work, struct act2000_card, snd_tq);
199
197 switch (card->bus) { 200 switch (card->bus) {
198 case ACT2000_BUS_ISA: 201 case ACT2000_BUS_ISA:
199 act2000_isa_send(card); 202 act2000_isa_send(card);
@@ -207,8 +210,11 @@ act2000_transmit(struct act2000_card *card)
207} 210}
208 211
209static void 212static void
210act2000_receive(struct act2000_card *card) 213act2000_receive(struct work_struct *work)
211{ 214{
215 struct act2000_card *card =
216 container_of(work, struct act2000_card, poll_tq);
217
212 switch (card->bus) { 218 switch (card->bus) {
213 case ACT2000_BUS_ISA: 219 case ACT2000_BUS_ISA:
214 act2000_isa_receive(card); 220 act2000_isa_receive(card);
@@ -227,7 +233,7 @@ act2000_poll(unsigned long data)
227 act2000_card * card = (act2000_card *)data; 233 act2000_card * card = (act2000_card *)data;
228 unsigned long flags; 234 unsigned long flags;
229 235
230 act2000_receive(card); 236 act2000_receive(&card->poll_tq);
231 spin_lock_irqsave(&card->lock, flags); 237 spin_lock_irqsave(&card->lock, flags);
232 mod_timer(&card->ptimer, jiffies+3); 238 mod_timer(&card->ptimer, jiffies+3);
233 spin_unlock_irqrestore(&card->lock, flags); 239 spin_unlock_irqrestore(&card->lock, flags);
@@ -578,9 +584,9 @@ act2000_alloccard(int bus, int port, int irq, char *id)
578 skb_queue_head_init(&card->sndq); 584 skb_queue_head_init(&card->sndq);
579 skb_queue_head_init(&card->rcvq); 585 skb_queue_head_init(&card->rcvq);
580 skb_queue_head_init(&card->ackq); 586 skb_queue_head_init(&card->ackq);
581 INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card); 587 INIT_WORK(&card->snd_tq, act2000_transmit);
582 INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card); 588 INIT_WORK(&card->rcv_tq, actcapi_dispatch);
583 INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card); 589 INIT_WORK(&card->poll_tq, act2000_receive);
584 init_timer(&card->ptimer); 590 init_timer(&card->ptimer);
585 card->interface.owner = THIS_MODULE; 591 card->interface.owner = THIS_MODULE;
586 card->interface.channels = ACT2000_BCH; 592 card->interface.channels = ACT2000_BCH;
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 8c4fcb9027b3..783a25526315 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -208,9 +208,10 @@ static void notify_down(u32 contr)
208 } 208 }
209} 209}
210 210
211static void notify_handler(void *data) 211static void notify_handler(struct work_struct *work)
212{ 212{
213 struct capi_notifier *np = data; 213 struct capi_notifier *np =
214 container_of(work, struct capi_notifier, work);
214 215
215 switch (np->cmd) { 216 switch (np->cmd) {
216 case KCI_CONTRUP: 217 case KCI_CONTRUP:
@@ -235,7 +236,7 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci)
235 if (!np) 236 if (!np)
236 return -ENOMEM; 237 return -ENOMEM;
237 238
238 INIT_WORK(&np->work, notify_handler, np); 239 INIT_WORK(&np->work, notify_handler);
239 np->cmd = cmd; 240 np->cmd = cmd;
240 np->controller = controller; 241 np->controller = controller;
241 np->applid = applid; 242 np->applid = applid;
@@ -248,10 +249,11 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci)
248 249
249/* -------- Receiver ------------------------------------------ */ 250/* -------- Receiver ------------------------------------------ */
250 251
251static void recv_handler(void *_ap) 252static void recv_handler(struct work_struct *work)
252{ 253{
253 struct sk_buff *skb; 254 struct sk_buff *skb;
254 struct capi20_appl *ap = (struct capi20_appl *) _ap; 255 struct capi20_appl *ap =
256 container_of(work, struct capi20_appl, recv_work);
255 257
256 if ((!ap) || (ap->release_in_progress)) 258 if ((!ap) || (ap->release_in_progress))
257 return; 259 return;
@@ -527,7 +529,7 @@ u16 capi20_register(struct capi20_appl *ap)
527 ap->callback = NULL; 529 ap->callback = NULL;
528 init_MUTEX(&ap->recv_sem); 530 init_MUTEX(&ap->recv_sem);
529 skb_queue_head_init(&ap->recv_queue); 531 skb_queue_head_init(&ap->recv_queue);
530 INIT_WORK(&ap->recv_work, recv_handler, (void *)ap); 532 INIT_WORK(&ap->recv_work, recv_handler);
531 ap->release_in_progress = 0; 533 ap->release_in_progress = 0;
532 534
533 write_unlock_irqrestore(&application_lock, flags); 535 write_unlock_irqrestore(&application_lock, flags);
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index bec59010bc66..3b19caeba258 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -232,9 +232,10 @@ Amd7930_new_ph(struct IsdnCardState *cs)
232 232
233 233
234static void 234static void
235Amd7930_bh(struct IsdnCardState *cs) 235Amd7930_bh(struct work_struct *work)
236{ 236{
237 237 struct IsdnCardState *cs =
238 container_of(work, struct IsdnCardState, tqueue);
238 struct PStack *stptr; 239 struct PStack *stptr;
239 240
240 if (!cs) 241 if (!cs)
@@ -789,7 +790,7 @@ Amd7930_init(struct IsdnCardState *cs)
789void __devinit 790void __devinit
790setup_Amd7930(struct IsdnCardState *cs) 791setup_Amd7930(struct IsdnCardState *cs)
791{ 792{
792 INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs); 793 INIT_WORK(&cs->tqueue, Amd7930_bh);
793 cs->dbusytimer.function = (void *) dbusy_timer_handler; 794 cs->dbusytimer.function = (void *) dbusy_timer_handler;
794 cs->dbusytimer.data = (long) cs; 795 cs->dbusytimer.data = (long) cs;
795 init_timer(&cs->dbusytimer); 796 init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 785b08554fca..cede72cdbb31 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1137,7 +1137,6 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
1137 cs->tx_skb = NULL; 1137 cs->tx_skb = NULL;
1138 cs->tx_cnt = 0; 1138 cs->tx_cnt = 0;
1139 cs->event = 0; 1139 cs->event = 0;
1140 cs->tqueue.data = cs;
1141 1140
1142 skb_queue_head_init(&cs->rq); 1141 skb_queue_head_init(&cs->rq);
1143 skb_queue_head_init(&cs->sq); 1142 skb_queue_head_init(&cs->sq);
@@ -1554,7 +1553,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg);
1554static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg); 1553static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg);
1555static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs); 1554static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs);
1556static void hisax_bc_close(struct BCState *bcs); 1555static void hisax_bc_close(struct BCState *bcs);
1557static void hisax_bh(struct IsdnCardState *cs); 1556static void hisax_bh(struct work_struct *work);
1558static void EChannel_proc_rcv(struct hisax_d_if *d_if); 1557static void EChannel_proc_rcv(struct hisax_d_if *d_if);
1559 1558
1560int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], 1559int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
@@ -1586,7 +1585,7 @@ int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
1586 hisax_d_if->cs = cs; 1585 hisax_d_if->cs = cs;
1587 cs->hw.hisax_d_if = hisax_d_if; 1586 cs->hw.hisax_d_if = hisax_d_if;
1588 cs->cardmsg = hisax_cardmsg; 1587 cs->cardmsg = hisax_cardmsg;
1589 INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs); 1588 INIT_WORK(&cs->tqueue, hisax_bh);
1590 cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1; 1589 cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1;
1591 for (i = 0; i < 2; i++) { 1590 for (i = 0; i < 2; i++) {
1592 cs->bcs[i].BC_SetStack = hisax_bc_setstack; 1591 cs->bcs[i].BC_SetStack = hisax_bc_setstack;
@@ -1618,8 +1617,10 @@ static void hisax_sched_event(struct IsdnCardState *cs, int event)
1618 schedule_work(&cs->tqueue); 1617 schedule_work(&cs->tqueue);
1619} 1618}
1620 1619
1621static void hisax_bh(struct IsdnCardState *cs) 1620static void hisax_bh(struct work_struct *work)
1622{ 1621{
1622 struct IsdnCardState *cs =
1623 container_of(work, struct IsdnCardState, tqueue);
1623 struct PStack *st; 1624 struct PStack *st;
1624 int pr; 1625 int pr;
1625 1626
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index d852c9d998b2..de9b1a4d6bac 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1083,8 +1083,9 @@ tx_b_frame(struct hfc4s8s_btype *bch)
1083/* bottom half handler for interrupt */ 1083/* bottom half handler for interrupt */
1084/*************************************/ 1084/*************************************/
1085static void 1085static void
1086hfc4s8s_bh(hfc4s8s_hw * hw) 1086hfc4s8s_bh(struct work_struct *work)
1087{ 1087{
1088 hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
1088 u_char b; 1089 u_char b;
1089 struct hfc4s8s_l1 *l1p; 1090 struct hfc4s8s_l1 *l1p;
1090 volatile u_char *fifo_stat; 1091 volatile u_char *fifo_stat;
@@ -1550,7 +1551,7 @@ setup_instance(hfc4s8s_hw * hw)
1550 goto out; 1551 goto out;
1551 } 1552 }
1552 1553
1553 INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw); 1554 INIT_WORK(&hw->tqueue, hfc4s8s_bh);
1554 1555
1555 if (request_irq 1556 if (request_irq
1556 (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) { 1557 (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index 6360e8214720..8d9864453a23 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -549,10 +549,11 @@ setstack_2b(struct PStack *st, struct BCState *bcs)
549} 549}
550 550
551static void 551static void
552hfcd_bh(struct IsdnCardState *cs) 552hfcd_bh(struct work_struct *work)
553{ 553{
554 if (!cs) 554 struct IsdnCardState *cs =
555 return; 555 container_of(work, struct IsdnCardState, tqueue);
556
556 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { 557 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
557 switch (cs->dc.hfcd.ph_state) { 558 switch (cs->dc.hfcd.ph_state) {
558 case (0): 559 case (0):
@@ -1072,5 +1073,5 @@ set_cs_func(struct IsdnCardState *cs)
1072 cs->dbusytimer.function = (void *) hfc_dbusy_timer; 1073 cs->dbusytimer.function = (void *) hfc_dbusy_timer;
1073 cs->dbusytimer.data = (long) cs; 1074 cs->dbusytimer.data = (long) cs;
1074 init_timer(&cs->dbusytimer); 1075 init_timer(&cs->dbusytimer);
1075 INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs); 1076 INIT_WORK(&cs->tqueue, hfcd_bh);
1076} 1077}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 93f60b563515..5db0a85b827f 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1506,8 +1506,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs)
1506/* handle L1 state changes */ 1506/* handle L1 state changes */
1507/***************************/ 1507/***************************/
1508static void 1508static void
1509hfcpci_bh(struct IsdnCardState *cs) 1509hfcpci_bh(struct work_struct *work)
1510{ 1510{
1511 struct IsdnCardState *cs =
1512 container_of(work, struct IsdnCardState, tqueue);
1511 u_long flags; 1513 u_long flags;
1512// struct PStack *stptr; 1514// struct PStack *stptr;
1513 1515
@@ -1722,7 +1724,7 @@ setup_hfcpci(struct IsdnCard *card)
1722 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); 1724 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1723 /* At this point the needed PCI config is done */ 1725 /* At this point the needed PCI config is done */
1724 /* fifos are still not enabled */ 1726 /* fifos are still not enabled */
1725 INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs); 1727 INIT_WORK(&cs->tqueue, hfcpci_bh);
1726 cs->setstack_d = setstack_hfcpci; 1728 cs->setstack_d = setstack_hfcpci;
1727 cs->BC_Send_Data = &hfcpci_send_data; 1729 cs->BC_Send_Data = &hfcpci_send_data;
1728 cs->readisac = NULL; 1730 cs->readisac = NULL;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 954d1536db1f..4fd09d21a27f 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1251,8 +1251,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs)
1251/* handle L1 state changes */ 1251/* handle L1 state changes */
1252/***************************/ 1252/***************************/
1253static void 1253static void
1254hfcsx_bh(struct IsdnCardState *cs) 1254hfcsx_bh(struct work_struct *work)
1255{ 1255{
1256 struct IsdnCardState *cs =
1257 container_of(work, struct IsdnCardState, tqueue);
1256 u_long flags; 1258 u_long flags;
1257 1259
1258 if (!cs) 1260 if (!cs)
@@ -1499,7 +1501,7 @@ setup_hfcsx(struct IsdnCard *card)
1499 cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; 1501 cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
1500 cs->dbusytimer.data = (long) cs; 1502 cs->dbusytimer.data = (long) cs;
1501 init_timer(&cs->dbusytimer); 1503 init_timer(&cs->dbusytimer);
1502 INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs); 1504 INIT_WORK(&cs->tqueue, hfcsx_bh);
1503 cs->readisac = NULL; 1505 cs->readisac = NULL;
1504 cs->writeisac = NULL; 1506 cs->writeisac = NULL;
1505 cs->readisacfifo = NULL; 1507 cs->readisacfifo = NULL;
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index da706925d54d..682cac32f259 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -77,8 +77,10 @@ icc_new_ph(struct IsdnCardState *cs)
77} 77}
78 78
79static void 79static void
80icc_bh(struct IsdnCardState *cs) 80icc_bh(struct work_struct *work)
81{ 81{
82 struct IsdnCardState *cs =
83 container_of(work, struct IsdnCardState, tqueue);
82 struct PStack *stptr; 84 struct PStack *stptr;
83 85
84 if (!cs) 86 if (!cs)
@@ -674,7 +676,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs)
674void __devinit 676void __devinit
675setup_icc(struct IsdnCardState *cs) 677setup_icc(struct IsdnCardState *cs)
676{ 678{
677 INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs); 679 INIT_WORK(&cs->tqueue, icc_bh);
678 cs->dbusytimer.function = (void *) dbusy_timer_handler; 680 cs->dbusytimer.function = (void *) dbusy_timer_handler;
679 cs->dbusytimer.data = (long) cs; 681 cs->dbusytimer.data = (long) cs;
680 init_timer(&cs->dbusytimer); 682 init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 282f349408bc..4e9f23803dae 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -81,8 +81,10 @@ isac_new_ph(struct IsdnCardState *cs)
81} 81}
82 82
83static void 83static void
84isac_bh(struct IsdnCardState *cs) 84isac_bh(struct work_struct *work)
85{ 85{
86 struct IsdnCardState *cs =
87 container_of(work, struct IsdnCardState, tqueue);
86 struct PStack *stptr; 88 struct PStack *stptr;
87 89
88 if (!cs) 90 if (!cs)
@@ -674,7 +676,7 @@ clear_pending_isac_ints(struct IsdnCardState *cs)
674void __devinit 676void __devinit
675setup_isac(struct IsdnCardState *cs) 677setup_isac(struct IsdnCardState *cs)
676{ 678{
677 INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs); 679 INIT_WORK(&cs->tqueue, isac_bh);
678 cs->dbusytimer.function = (void *) dbusy_timer_handler; 680 cs->dbusytimer.function = (void *) dbusy_timer_handler;
679 cs->dbusytimer.data = (long) cs; 681 cs->dbusytimer.data = (long) cs;
680 init_timer(&cs->dbusytimer); 682 init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 674af673ff96..6f1a6583b17d 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -437,8 +437,10 @@ extern void BChannel_bh(struct BCState *);
437#define B_LL_OK 10 437#define B_LL_OK 10
438 438
439static void 439static void
440isar_bh(struct BCState *bcs) 440isar_bh(struct work_struct *work)
441{ 441{
442 struct BCState *bcs = container_of(work, struct BCState, tqueue);
443
442 BChannel_bh(bcs); 444 BChannel_bh(bcs);
443 if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event)) 445 if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event))
444 ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR); 446 ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR);
@@ -1580,7 +1582,7 @@ isar_setup(struct IsdnCardState *cs)
1580 cs->bcs[i].mode = 0; 1582 cs->bcs[i].mode = 0;
1581 cs->bcs[i].hw.isar.dpath = i + 1; 1583 cs->bcs[i].hw.isar.dpath = i + 1;
1582 modeisar(&cs->bcs[i], 0, 0); 1584 modeisar(&cs->bcs[i], 0, 0);
1583 INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]); 1585 INIT_WORK(&cs->bcs[i].tqueue, isar_bh);
1584 } 1586 }
1585} 1587}
1586 1588
diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c
index bab356886483..a14204ec88ee 100644
--- a/drivers/isdn/hisax/isdnl1.c
+++ b/drivers/isdn/hisax/isdnl1.c
@@ -315,8 +315,10 @@ BChannel_proc_ack(struct BCState *bcs)
315} 315}
316 316
317void 317void
318BChannel_bh(struct BCState *bcs) 318BChannel_bh(struct work_struct *work)
319{ 319{
320 struct BCState *bcs = container_of(work, struct BCState, tqueue);
321
320 if (!bcs) 322 if (!bcs)
321 return; 323 return;
322 if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event)) 324 if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event))
@@ -362,7 +364,7 @@ init_bcstate(struct IsdnCardState *cs, int bc)
362 364
363 bcs->cs = cs; 365 bcs->cs = cs;
364 bcs->channel = bc; 366 bcs->channel = bc;
365 INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs); 367 INIT_WORK(&bcs->tqueue, BChannel_bh);
366 spin_lock_init(&bcs->aclock); 368 spin_lock_init(&bcs->aclock);
367 bcs->BC_SetStack = NULL; 369 bcs->BC_SetStack = NULL;
368 bcs->BC_Close = NULL; 370 bcs->BC_Close = NULL;
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index 1655341797a9..3aeceaf9769e 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -101,8 +101,10 @@ W6692_new_ph(struct IsdnCardState *cs)
101} 101}
102 102
103static void 103static void
104W6692_bh(struct IsdnCardState *cs) 104W6692_bh(struct work_struct *work)
105{ 105{
106 struct IsdnCardState *cs =
107 container_of(work, struct IsdnCardState, tqueue);
106 struct PStack *stptr; 108 struct PStack *stptr;
107 109
108 if (!cs) 110 if (!cs)
@@ -1070,7 +1072,7 @@ setup_w6692(struct IsdnCard *card)
1070 id_list[cs->subtyp].card_name, cs->irq, 1072 id_list[cs->subtyp].card_name, cs->irq,
1071 cs->hw.w6692.iobase); 1073 cs->hw.w6692.iobase);
1072 1074
1073 INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs); 1075 INIT_WORK(&cs->tqueue, W6692_bh);
1074 cs->readW6692 = &ReadW6692; 1076 cs->readW6692 = &ReadW6692;
1075 cs->writeW6692 = &WriteW6692; 1077 cs->writeW6692 = &WriteW6692;
1076 cs->readisacfifo = &ReadISACfifo; 1078 cs->readisacfifo = &ReadISACfifo;
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f8d6ae66b41..2e4daebfb7e0 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -984,9 +984,9 @@ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb)
984/* 984/*
985 * called from tq_immediate 985 * called from tq_immediate
986 */ 986 */
987static void isdn_net_softint(void *private) 987static void isdn_net_softint(struct work_struct *work)
988{ 988{
989 isdn_net_local *lp = private; 989 isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
990 struct sk_buff *skb; 990 struct sk_buff *skb;
991 991
992 spin_lock_bh(&lp->xmit_lock); 992 spin_lock_bh(&lp->xmit_lock);
@@ -2596,7 +2596,7 @@ isdn_net_new(char *name, struct net_device *master)
2596 netdev->local->netdev = netdev; 2596 netdev->local->netdev = netdev;
2597 netdev->local->next = netdev->local; 2597 netdev->local->next = netdev->local;
2598 2598
2599 INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local); 2599 INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
2600 spin_lock_init(&netdev->local->xmit_lock); 2600 spin_lock_init(&netdev->local->xmit_lock);
2601 2601
2602 netdev->local->isdn_device = -1; 2602 netdev->local->isdn_device = -1;
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 6ead5e1508b7..1966f3410a13 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -68,8 +68,6 @@ static void pcbit_set_msn(struct pcbit_dev *dev, char *list);
68static int pcbit_check_msn(struct pcbit_dev *dev, char *msn); 68static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
69 69
70 70
71extern void pcbit_deliver(void * data);
72
73int pcbit_init_dev(int board, int mem_base, int irq) 71int pcbit_init_dev(int board, int mem_base, int irq)
74{ 72{
75 struct pcbit_dev *dev; 73 struct pcbit_dev *dev;
@@ -129,7 +127,7 @@ int pcbit_init_dev(int board, int mem_base, int irq)
129 memset(dev->b2, 0, sizeof(struct pcbit_chan)); 127 memset(dev->b2, 0, sizeof(struct pcbit_chan));
130 dev->b2->id = 1; 128 dev->b2->id = 1;
131 129
132 INIT_WORK(&dev->qdelivery, pcbit_deliver, dev); 130 INIT_WORK(&dev->qdelivery, pcbit_deliver);
133 131
134 /* 132 /*
135 * interrupts 133 * interrupts
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index 937fd2120381..0c9f6df873fc 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -67,7 +67,6 @@ extern void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg,
67 * Prototypes 67 * Prototypes
68 */ 68 */
69 69
70void pcbit_deliver(void *data);
71static void pcbit_transmit(struct pcbit_dev *dev); 70static void pcbit_transmit(struct pcbit_dev *dev);
72 71
73static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack); 72static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack);
@@ -299,11 +298,12 @@ pcbit_transmit(struct pcbit_dev *dev)
299 */ 298 */
300 299
301void 300void
302pcbit_deliver(void *data) 301pcbit_deliver(struct work_struct *work)
303{ 302{
304 struct frame_buf *frame; 303 struct frame_buf *frame;
305 unsigned long flags, msg; 304 unsigned long flags, msg;
306 struct pcbit_dev *dev = (struct pcbit_dev *) data; 305 struct pcbit_dev *dev =
306 container_of(work, struct pcbit_dev, qdelivery);
307 307
308 spin_lock_irqsave(&dev->lock, flags); 308 spin_lock_irqsave(&dev->lock, flags);
309 309
diff --git a/drivers/isdn/pcbit/pcbit.h b/drivers/isdn/pcbit/pcbit.h
index 388bacefd23a..19c18e88ff16 100644
--- a/drivers/isdn/pcbit/pcbit.h
+++ b/drivers/isdn/pcbit/pcbit.h
@@ -166,4 +166,6 @@ struct pcbit_ioctl {
166#define L2_RUNNING 5 166#define L2_RUNNING 5
167#define L2_ERROR 6 167#define L2_ERROR 6
168 168
169extern void pcbit_deliver(struct work_struct *work);
170
169#endif 171#endif
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index ade25b3fbb35..4871158aca3e 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -600,7 +600,7 @@ core_initcall(smu_late_init);
600 * sysfs visibility 600 * sysfs visibility
601 */ 601 */
602 602
603static void smu_expose_childs(void *unused) 603static void smu_expose_childs(struct work_struct *unused)
604{ 604{
605 struct device_node *np; 605 struct device_node *np;
606 606
@@ -610,7 +610,7 @@ static void smu_expose_childs(void *unused)
610 &smu->of_dev->dev); 610 &smu->of_dev->dev);
611} 611}
612 612
613static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL); 613static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
614 614
615static int smu_platform_probe(struct of_device* dev, 615static int smu_platform_probe(struct of_device* dev,
616 const struct of_device_id *match) 616 const struct of_device_id *match)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 08a40f4e4f60..ed2d4ef27fd8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -458,11 +458,11 @@ static void dec_pending(struct crypt_io *io, int error)
458 * interrupt context. 458 * interrupt context.
459 */ 459 */
460static struct workqueue_struct *_kcryptd_workqueue; 460static struct workqueue_struct *_kcryptd_workqueue;
461static void kcryptd_do_work(void *data); 461static void kcryptd_do_work(struct work_struct *work);
462 462
463static void kcryptd_queue_io(struct crypt_io *io) 463static void kcryptd_queue_io(struct crypt_io *io)
464{ 464{
465 INIT_WORK(&io->work, kcryptd_do_work, io); 465 INIT_WORK(&io->work, kcryptd_do_work);
466 queue_work(_kcryptd_workqueue, &io->work); 466 queue_work(_kcryptd_workqueue, &io->work);
467} 467}
468 468
@@ -618,9 +618,9 @@ static void process_read_endio(struct crypt_io *io)
618 dec_pending(io, crypt_convert(cc, &ctx)); 618 dec_pending(io, crypt_convert(cc, &ctx));
619} 619}
620 620
621static void kcryptd_do_work(void *data) 621static void kcryptd_do_work(struct work_struct *work)
622{ 622{
623 struct crypt_io *io = data; 623 struct crypt_io *io = container_of(work, struct crypt_io, work);
624 624
625 if (io->post_process) 625 if (io->post_process)
626 process_read_endio(io); 626 process_read_endio(io);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d754e0bc6e90..e77ee6fd1044 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -104,8 +104,8 @@ typedef int (*action_fn) (struct pgpath *pgpath);
104static kmem_cache_t *_mpio_cache; 104static kmem_cache_t *_mpio_cache;
105 105
106struct workqueue_struct *kmultipathd; 106struct workqueue_struct *kmultipathd;
107static void process_queued_ios(void *data); 107static void process_queued_ios(struct work_struct *work);
108static void trigger_event(void *data); 108static void trigger_event(struct work_struct *work);
109 109
110 110
111/*----------------------------------------------- 111/*-----------------------------------------------
@@ -173,8 +173,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
173 INIT_LIST_HEAD(&m->priority_groups); 173 INIT_LIST_HEAD(&m->priority_groups);
174 spin_lock_init(&m->lock); 174 spin_lock_init(&m->lock);
175 m->queue_io = 1; 175 m->queue_io = 1;
176 INIT_WORK(&m->process_queued_ios, process_queued_ios, m); 176 INIT_WORK(&m->process_queued_ios, process_queued_ios);
177 INIT_WORK(&m->trigger_event, trigger_event, m); 177 INIT_WORK(&m->trigger_event, trigger_event);
178 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 178 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
179 if (!m->mpio_pool) { 179 if (!m->mpio_pool) {
180 kfree(m); 180 kfree(m);
@@ -379,9 +379,10 @@ static void dispatch_queued_ios(struct multipath *m)
379 } 379 }
380} 380}
381 381
382static void process_queued_ios(void *data) 382static void process_queued_ios(struct work_struct *work)
383{ 383{
384 struct multipath *m = (struct multipath *) data; 384 struct multipath *m =
385 container_of(work, struct multipath, process_queued_ios);
385 struct hw_handler *hwh = &m->hw_handler; 386 struct hw_handler *hwh = &m->hw_handler;
386 struct pgpath *pgpath = NULL; 387 struct pgpath *pgpath = NULL;
387 unsigned init_required = 0, must_queue = 1; 388 unsigned init_required = 0, must_queue = 1;
@@ -421,9 +422,10 @@ out:
421 * An event is triggered whenever a path is taken out of use. 422 * An event is triggered whenever a path is taken out of use.
422 * Includes path failure and PG bypass. 423 * Includes path failure and PG bypass.
423 */ 424 */
424static void trigger_event(void *data) 425static void trigger_event(struct work_struct *work)
425{ 426{
426 struct multipath *m = (struct multipath *) data; 427 struct multipath *m =
428 container_of(work, struct multipath, trigger_event);
427 429
428 dm_table_event(m->ti->table); 430 dm_table_event(m->ti->table);
429} 431}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 48a653b3f518..fc8cbb168e3e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -883,7 +883,7 @@ static void do_mirror(struct mirror_set *ms)
883 do_writes(ms, &writes); 883 do_writes(ms, &writes);
884} 884}
885 885
886static void do_work(void *ignored) 886static void do_work(struct work_struct *ignored)
887{ 887{
888 struct mirror_set *ms; 888 struct mirror_set *ms;
889 889
@@ -1269,7 +1269,7 @@ static int __init dm_mirror_init(void)
1269 dm_dirty_log_exit(); 1269 dm_dirty_log_exit();
1270 return r; 1270 return r;
1271 } 1271 }
1272 INIT_WORK(&_kmirrord_work, do_work, NULL); 1272 INIT_WORK(&_kmirrord_work, do_work);
1273 1273
1274 r = dm_register_target(&mirror_target); 1274 r = dm_register_target(&mirror_target);
1275 if (r < 0) { 1275 if (r < 0) {
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 5281e0094072..91c7aa1fed0e 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,7 +40,7 @@
40#define SNAPSHOT_PAGES 256 40#define SNAPSHOT_PAGES 256
41 41
42struct workqueue_struct *ksnapd; 42struct workqueue_struct *ksnapd;
43static void flush_queued_bios(void *data); 43static void flush_queued_bios(struct work_struct *work);
44 44
45struct pending_exception { 45struct pending_exception {
46 struct exception e; 46 struct exception e;
@@ -528,7 +528,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
528 } 528 }
529 529
530 bio_list_init(&s->queued_bios); 530 bio_list_init(&s->queued_bios);
531 INIT_WORK(&s->queued_bios_work, flush_queued_bios, s); 531 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
532 532
533 /* Add snapshot to the list of snapshots for this origin */ 533 /* Add snapshot to the list of snapshots for this origin */
534 /* Exceptions aren't triggered till snapshot_resume() is called */ 534 /* Exceptions aren't triggered till snapshot_resume() is called */
@@ -603,9 +603,10 @@ static void flush_bios(struct bio *bio)
603 } 603 }
604} 604}
605 605
606static void flush_queued_bios(void *data) 606static void flush_queued_bios(struct work_struct *work)
607{ 607{
608 struct dm_snapshot *s = (struct dm_snapshot *) data; 608 struct dm_snapshot *s =
609 container_of(work, struct dm_snapshot, queued_bios_work);
609 struct bio *queued_bios; 610 struct bio *queued_bios;
610 unsigned long flags; 611 unsigned long flags;
611 612
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index f1db6eff4857..b3c01496c737 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -417,7 +417,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
417/* 417/*
418 * kcopyd does this every time it's woken up. 418 * kcopyd does this every time it's woken up.
419 */ 419 */
420static void do_work(void *ignored) 420static void do_work(struct work_struct *ignored)
421{ 421{
422 /* 422 /*
423 * The order that these are called is *very* important. 423 * The order that these are called is *very* important.
@@ -628,7 +628,7 @@ static int kcopyd_init(void)
628 } 628 }
629 629
630 kcopyd_clients++; 630 kcopyd_clients++;
631 INIT_WORK(&_kcopyd_work, do_work, NULL); 631 INIT_WORK(&_kcopyd_work, do_work);
632 mutex_unlock(&kcopyd_init_lock); 632 mutex_unlock(&kcopyd_init_lock);
633 return 0; 633 return 0;
634} 634}
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c
index 06893243f3d4..6e166801505d 100644
--- a/drivers/media/dvb/b2c2/flexcop-pci.c
+++ b/drivers/media/dvb/b2c2/flexcop-pci.c
@@ -63,7 +63,7 @@ struct flexcop_pci {
63 63
64 unsigned long last_irq; 64 unsigned long last_irq;
65 65
66 struct work_struct irq_check_work; 66 struct delayed_work irq_check_work;
67 67
68 struct flexcop_device *fc_dev; 68 struct flexcop_device *fc_dev;
69}; 69};
@@ -97,9 +97,10 @@ static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc, flexcop_ibi_regi
97 return 0; 97 return 0;
98} 98}
99 99
100static void flexcop_pci_irq_check_work(void *data) 100static void flexcop_pci_irq_check_work(struct work_struct *work)
101{ 101{
102 struct flexcop_pci *fc_pci = data; 102 struct flexcop_pci *fc_pci =
103 container_of(work, struct flexcop_pci, irq_check_work.work);
103 struct flexcop_device *fc = fc_pci->fc_dev; 104 struct flexcop_device *fc = fc_pci->fc_dev;
104 105
105 flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); 106 flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714);
@@ -371,7 +372,7 @@ static int flexcop_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
371 if ((ret = flexcop_pci_dma_init(fc_pci)) != 0) 372 if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
372 goto err_fc_exit; 373 goto err_fc_exit;
373 374
374 INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci); 375 INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
375 376
376 return ret; 377 return ret;
377 378
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index 55bc891768c2..8458ff3f351e 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -127,7 +127,7 @@ struct cinergyt2 {
127 127
128 struct dvbt_set_parameters_msg param; 128 struct dvbt_set_parameters_msg param;
129 struct dvbt_get_status_msg status; 129 struct dvbt_get_status_msg status;
130 struct work_struct query_work; 130 struct delayed_work query_work;
131 131
132 wait_queue_head_t poll_wq; 132 wait_queue_head_t poll_wq;
133 int pending_fe_events; 133 int pending_fe_events;
@@ -141,7 +141,7 @@ struct cinergyt2 {
141#ifdef ENABLE_RC 141#ifdef ENABLE_RC
142 struct input_dev *rc_input_dev; 142 struct input_dev *rc_input_dev;
143 char phys[64]; 143 char phys[64];
144 struct work_struct rc_query_work; 144 struct delayed_work rc_query_work;
145 int rc_input_event; 145 int rc_input_event;
146 u32 rc_last_code; 146 u32 rc_last_code;
147 unsigned long last_event_jiffies; 147 unsigned long last_event_jiffies;
@@ -722,9 +722,10 @@ static struct dvb_device cinergyt2_fe_template = {
722 722
723#ifdef ENABLE_RC 723#ifdef ENABLE_RC
724 724
725static void cinergyt2_query_rc (void *data) 725static void cinergyt2_query_rc (struct work_struct *work)
726{ 726{
727 struct cinergyt2 *cinergyt2 = data; 727 struct cinergyt2 *cinergyt2 =
728 container_of(work, struct cinergyt2, rc_query_work.work);
728 char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS }; 729 char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
729 struct cinergyt2_rc_event rc_events[12]; 730 struct cinergyt2_rc_event rc_events[12];
730 int n, len, i; 731 int n, len, i;
@@ -805,7 +806,7 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2)
805 strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys)); 806 strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
806 cinergyt2->rc_input_event = KEY_MAX; 807 cinergyt2->rc_input_event = KEY_MAX;
807 cinergyt2->rc_last_code = ~0; 808 cinergyt2->rc_last_code = ~0;
808 INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2); 809 INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
809 810
810 input_dev->name = DRIVER_NAME " remote control"; 811 input_dev->name = DRIVER_NAME " remote control";
811 input_dev->phys = cinergyt2->phys; 812 input_dev->phys = cinergyt2->phys;
@@ -846,9 +847,10 @@ static inline void cinergyt2_resume_rc(struct cinergyt2 *cinergyt2) { }
846 847
847#endif /* ENABLE_RC */ 848#endif /* ENABLE_RC */
848 849
849static void cinergyt2_query (void *data) 850static void cinergyt2_query (struct work_struct *work)
850{ 851{
851 struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data; 852 struct cinergyt2 *cinergyt2 =
853 container_of(work, struct cinergyt2, query_work.work);
852 char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS }; 854 char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
853 struct dvbt_get_status_msg *s = &cinergyt2->status; 855 struct dvbt_get_status_msg *s = &cinergyt2->status;
854 uint8_t lock_bits; 856 uint8_t lock_bits;
@@ -892,7 +894,7 @@ static int cinergyt2_probe (struct usb_interface *intf,
892 894
893 mutex_init(&cinergyt2->sem); 895 mutex_init(&cinergyt2->sem);
894 init_waitqueue_head (&cinergyt2->poll_wq); 896 init_waitqueue_head (&cinergyt2->poll_wq);
895 INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2); 897 INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
896 898
897 cinergyt2->udev = interface_to_usbdev(intf); 899 cinergyt2->udev = interface_to_usbdev(intf);
898 cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; 900 cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8859ab74f0fe..ebf4dc5190f6 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -127,6 +127,7 @@ struct dvb_net_priv {
127 int in_use; 127 int in_use;
128 struct net_device_stats stats; 128 struct net_device_stats stats;
129 u16 pid; 129 u16 pid;
130 struct net_device *net;
130 struct dvb_net *host; 131 struct dvb_net *host;
131 struct dmx_demux *demux; 132 struct dmx_demux *demux;
132 struct dmx_section_feed *secfeed; 133 struct dmx_section_feed *secfeed;
@@ -1123,10 +1124,11 @@ static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc)
1123} 1124}
1124 1125
1125 1126
1126static void wq_set_multicast_list (void *data) 1127static void wq_set_multicast_list (struct work_struct *work)
1127{ 1128{
1128 struct net_device *dev = data; 1129 struct dvb_net_priv *priv =
1129 struct dvb_net_priv *priv = dev->priv; 1130 container_of(work, struct dvb_net_priv, set_multicast_list_wq);
1131 struct net_device *dev = priv->net;
1130 1132
1131 dvb_net_feed_stop(dev); 1133 dvb_net_feed_stop(dev);
1132 priv->rx_mode = RX_MODE_UNI; 1134 priv->rx_mode = RX_MODE_UNI;
@@ -1167,9 +1169,11 @@ static void dvb_net_set_multicast_list (struct net_device *dev)
1167} 1169}
1168 1170
1169 1171
1170static void wq_restart_net_feed (void *data) 1172static void wq_restart_net_feed (struct work_struct *work)
1171{ 1173{
1172 struct net_device *dev = data; 1174 struct dvb_net_priv *priv =
1175 container_of(work, struct dvb_net_priv, restart_net_feed_wq);
1176 struct net_device *dev = priv->net;
1173 1177
1174 if (netif_running(dev)) { 1178 if (netif_running(dev)) {
1175 dvb_net_feed_stop(dev); 1179 dvb_net_feed_stop(dev);
@@ -1276,6 +1280,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
1276 dvbnet->device[if_num] = net; 1280 dvbnet->device[if_num] = net;
1277 1281
1278 priv = net->priv; 1282 priv = net->priv;
1283 priv->net = net;
1279 priv->demux = dvbnet->demux; 1284 priv->demux = dvbnet->demux;
1280 priv->pid = pid; 1285 priv->pid = pid;
1281 priv->rx_mode = RX_MODE_UNI; 1286 priv->rx_mode = RX_MODE_UNI;
@@ -1284,8 +1289,8 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
1284 priv->feedtype = feedtype; 1289 priv->feedtype = feedtype;
1285 reset_ule(priv); 1290 reset_ule(priv);
1286 1291
1287 INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net); 1292 INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
1288 INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net); 1293 INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
1289 mutex_init(&priv->mutex); 1294 mutex_init(&priv->mutex);
1290 1295
1291 net->base_addr = pid; 1296 net->base_addr = pid;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 0a3a0b6c2350..794e4471561c 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -13,9 +13,10 @@
13 * 13 *
14 * TODO: Fix the repeat rate of the input device. 14 * TODO: Fix the repeat rate of the input device.
15 */ 15 */
16static void dvb_usb_read_remote_control(void *data) 16static void dvb_usb_read_remote_control(struct work_struct *work)
17{ 17{
18 struct dvb_usb_device *d = data; 18 struct dvb_usb_device *d =
19 container_of(work, struct dvb_usb_device, rc_query_work.work);
19 u32 event; 20 u32 event;
20 int state; 21 int state;
21 22
@@ -128,7 +129,7 @@ int dvb_usb_remote_init(struct dvb_usb_device *d)
128 129
129 input_register_device(d->rc_input_dev); 130 input_register_device(d->rc_input_dev);
130 131
131 INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d); 132 INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control);
132 133
133 info("schedule remote query interval to %d msecs.", d->props.rc_interval); 134 info("schedule remote query interval to %d msecs.", d->props.rc_interval);
134 schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval)); 135 schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval));
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 376c45a8e779..0d721731a524 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -369,7 +369,7 @@ struct dvb_usb_device {
369 /* remote control */ 369 /* remote control */
370 struct input_dev *rc_input_dev; 370 struct input_dev *rc_input_dev;
371 char rc_phys[64]; 371 char rc_phys[64];
372 struct work_struct rc_query_work; 372 struct delayed_work rc_query_work;
373 u32 last_event; 373 u32 last_event;
374 int last_state; 374 int last_state;
375 375
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index 41f4b8d17559..b12cec94f4cc 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -82,6 +82,8 @@ struct pp_cam_entry {
82 struct pardevice *pdev; 82 struct pardevice *pdev;
83 struct parport *port; 83 struct parport *port;
84 struct work_struct cb_task; 84 struct work_struct cb_task;
85 void (*cb_func)(void *cbdata);
86 void *cb_data;
85 int open_count; 87 int open_count;
86 wait_queue_head_t wq_stream; 88 wait_queue_head_t wq_stream;
87 /* image state flags */ 89 /* image state flags */
@@ -130,6 +132,20 @@ static void cpia_parport_disable_irq( struct parport *port ) {
130#define PARPORT_CHUNK_SIZE PAGE_SIZE 132#define PARPORT_CHUNK_SIZE PAGE_SIZE
131 133
132 134
135static void cpia_pp_run_callback(struct work_struct *work)
136{
137 void (*cb_func)(void *cbdata);
138 void *cb_data;
139 struct pp_cam_entry *cam;
140
141 cam = container_of(work, struct pp_cam_entry, cb_task);
142 cb_func = cam->cb_func;
143 cb_data = cam->cb_data;
144 work_release(work);
145
146 cb_func(cb_data);
147}
148
133/**************************************************************************** 149/****************************************************************************
134 * 150 *
135 * CPiA-specific low-level parport functions for nibble uploads 151 * CPiA-specific low-level parport functions for nibble uploads
@@ -664,7 +680,9 @@ static int cpia_pp_registerCallback(void *privdata, void (*cb)(void *cbdata), vo
664 int retval = 0; 680 int retval = 0;
665 681
666 if(cam->port->irq != PARPORT_IRQ_NONE) { 682 if(cam->port->irq != PARPORT_IRQ_NONE) {
667 INIT_WORK(&cam->cb_task, cb, cbdata); 683 cam->cb_func = cb;
684 cam->cb_data = cbdata;
685 INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback);
668 } else { 686 } else {
669 retval = -1; 687 retval = -1;
670 } 688 }
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 57e1c024a547..e60a0a52e4b2 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -145,9 +145,9 @@ static void ir_timer(unsigned long data)
145 schedule_work(&ir->work); 145 schedule_work(&ir->work);
146} 146}
147 147
148static void cx88_ir_work(void *data) 148static void cx88_ir_work(struct work_struct *work)
149{ 149{
150 struct cx88_IR *ir = data; 150 struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
151 unsigned long timeout; 151 unsigned long timeout;
152 152
153 cx88_ir_handle_key(ir); 153 cx88_ir_handle_key(ir);
@@ -308,7 +308,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
308 core->ir = ir; 308 core->ir = ir;
309 309
310 if (ir->polling) { 310 if (ir->polling) {
311 INIT_WORK(&ir->work, cx88_ir_work, ir); 311 INIT_WORK(&ir->work, cx88_ir_work);
312 init_timer(&ir->timer); 312 init_timer(&ir->timer);
313 ir->timer.function = ir_timer; 313 ir->timer.function = ir_timer;
314 ir->timer.data = (unsigned long)ir; 314 ir->timer.data = (unsigned long)ir;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 1457b1602221..ab87e7bfe84f 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -268,9 +268,9 @@ static void ir_timer(unsigned long data)
268 schedule_work(&ir->work); 268 schedule_work(&ir->work);
269} 269}
270 270
271static void ir_work(void *data) 271static void ir_work(struct work_struct *work)
272{ 272{
273 struct IR_i2c *ir = data; 273 struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
274 ir_key_poll(ir); 274 ir_key_poll(ir);
275 mod_timer(&ir->timer, jiffies+HZ/10); 275 mod_timer(&ir->timer, jiffies+HZ/10);
276} 276}
@@ -400,7 +400,7 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
400 ir->input->name,ir->input->phys,adap->name); 400 ir->input->name,ir->input->phys,adap->name);
401 401
402 /* start polling via eventd */ 402 /* start polling via eventd */
403 INIT_WORK(&ir->work, ir_work, ir); 403 INIT_WORK(&ir->work, ir_work);
404 init_timer(&ir->timer); 404 init_timer(&ir->timer);
405 ir->timer.function = ir_timer; 405 ir->timer.function = ir_timer;
406 ir->timer.data = (unsigned long)ir; 406 ir->timer.data = (unsigned long)ir;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
index f129f316d20e..cf129746205d 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -45,16 +45,21 @@ static void pvr2_context_trigger_poll(struct pvr2_context *mp)
45} 45}
46 46
47 47
48static void pvr2_context_poll(struct pvr2_context *mp) 48static void pvr2_context_poll(struct work_struct *work)
49{ 49{
50 struct pvr2_context *mp =
51 container_of(work, struct pvr2_context, workpoll);
50 pvr2_context_enter(mp); do { 52 pvr2_context_enter(mp); do {
51 pvr2_hdw_poll(mp->hdw); 53 pvr2_hdw_poll(mp->hdw);
52 } while (0); pvr2_context_exit(mp); 54 } while (0); pvr2_context_exit(mp);
53} 55}
54 56
55 57
56static void pvr2_context_setup(struct pvr2_context *mp) 58static void pvr2_context_setup(struct work_struct *work)
57{ 59{
60 struct pvr2_context *mp =
61 container_of(work, struct pvr2_context, workinit);
62
58 pvr2_context_enter(mp); do { 63 pvr2_context_enter(mp); do {
59 if (!pvr2_hdw_dev_ok(mp->hdw)) break; 64 if (!pvr2_hdw_dev_ok(mp->hdw)) break;
60 pvr2_hdw_setup(mp->hdw); 65 pvr2_hdw_setup(mp->hdw);
@@ -92,8 +97,8 @@ struct pvr2_context *pvr2_context_create(
92 } 97 }
93 98
94 mp->workqueue = create_singlethread_workqueue("pvrusb2"); 99 mp->workqueue = create_singlethread_workqueue("pvrusb2");
95 INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp); 100 INIT_WORK(&mp->workinit, pvr2_context_setup);
96 INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp); 101 INIT_WORK(&mp->workpoll, pvr2_context_poll);
97 queue_work(mp->workqueue,&mp->workinit); 102 queue_work(mp->workqueue,&mp->workinit);
98 done: 103 done:
99 return mp; 104 return mp;
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index 7b9859c33018..92eabf88a09b 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -324,9 +324,9 @@ static void saa6588_timer(unsigned long data)
324 schedule_work(&s->work); 324 schedule_work(&s->work);
325} 325}
326 326
327static void saa6588_work(void *data) 327static void saa6588_work(struct work_struct *work)
328{ 328{
329 struct saa6588 *s = (struct saa6588 *)data; 329 struct saa6588 *s = container_of(work, struct saa6588, work);
330 330
331 saa6588_i2c_poll(s); 331 saa6588_i2c_poll(s);
332 mod_timer(&s->timer, jiffies + msecs_to_jiffies(20)); 332 mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
@@ -419,7 +419,7 @@ static int saa6588_attach(struct i2c_adapter *adap, int addr, int kind)
419 saa6588_configure(s); 419 saa6588_configure(s);
420 420
421 /* start polling via eventd */ 421 /* start polling via eventd */
422 INIT_WORK(&s->work, saa6588_work, s); 422 INIT_WORK(&s->work, saa6588_work);
423 init_timer(&s->timer); 423 init_timer(&s->timer);
424 s->timer.function = saa6588_timer; 424 s->timer.function = saa6588_timer;
425 s->timer.data = (unsigned long)s; 425 s->timer.data = (unsigned long)s;
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 65d044086ce9..daaae870a2c4 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -343,9 +343,10 @@ static struct video_device saa7134_empress_template =
343 .minor = -1, 343 .minor = -1,
344}; 344};
345 345
346static void empress_signal_update(void* data) 346static void empress_signal_update(struct work_struct *work)
347{ 347{
348 struct saa7134_dev* dev = (struct saa7134_dev*) data; 348 struct saa7134_dev* dev =
349 container_of(work, struct saa7134_dev, empress_workqueue);
349 350
350 if (dev->nosignal) { 351 if (dev->nosignal) {
351 dprintk("no video signal\n"); 352 dprintk("no video signal\n");
@@ -378,7 +379,7 @@ static int empress_init(struct saa7134_dev *dev)
378 "%s empress (%s)", dev->name, 379 "%s empress (%s)", dev->name,
379 saa7134_boards[dev->board].name); 380 saa7134_boards[dev->board].name);
380 381
381 INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev); 382 INIT_WORK(&dev->empress_workqueue, empress_signal_update);
382 383
383 err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, 384 err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
384 empress_nr[dev->nr]); 385 empress_nr[dev->nr]);
@@ -399,7 +400,7 @@ static int empress_init(struct saa7134_dev *dev)
399 sizeof(struct saa7134_buf), 400 sizeof(struct saa7134_buf),
400 dev); 401 dev);
401 402
402 empress_signal_update(dev); 403 empress_signal_update(&dev->empress_workqueue);
403 return 0; 404 return 0;
404} 405}
405 406
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 1dd491773150..ef2b55e19910 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1018,9 +1018,10 @@ mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum)
1018} 1018}
1019 1019
1020static void 1020static void
1021mptfc_setup_reset(void *arg) 1021mptfc_setup_reset(struct work_struct *work)
1022{ 1022{
1023 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 1023 MPT_ADAPTER *ioc =
1024 container_of(work, MPT_ADAPTER, fc_setup_reset_work);
1024 u64 pn; 1025 u64 pn;
1025 struct mptfc_rport_info *ri; 1026 struct mptfc_rport_info *ri;
1026 1027
@@ -1043,9 +1044,10 @@ mptfc_setup_reset(void *arg)
1043} 1044}
1044 1045
1045static void 1046static void
1046mptfc_rescan_devices(void *arg) 1047mptfc_rescan_devices(struct work_struct *work)
1047{ 1048{
1048 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 1049 MPT_ADAPTER *ioc =
1050 container_of(work, MPT_ADAPTER, fc_rescan_work);
1049 int ii; 1051 int ii;
1050 u64 pn; 1052 u64 pn;
1051 struct mptfc_rport_info *ri; 1053 struct mptfc_rport_info *ri;
@@ -1154,8 +1156,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1154 } 1156 }
1155 1157
1156 spin_lock_init(&ioc->fc_rescan_work_lock); 1158 spin_lock_init(&ioc->fc_rescan_work_lock);
1157 INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc); 1159 INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
1158 INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc); 1160 INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
1159 1161
1160 spin_lock_irqsave(&ioc->FreeQlock, flags); 1162 spin_lock_irqsave(&ioc->FreeQlock, flags);
1161 1163
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 314c3a27585d..b7c4407c5e3f 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -111,7 +111,8 @@ struct mpt_lan_priv {
111 u32 total_received; 111 u32 total_received;
112 struct net_device_stats stats; /* Per device statistics */ 112 struct net_device_stats stats; /* Per device statistics */
113 113
114 struct work_struct post_buckets_task; 114 struct delayed_work post_buckets_task;
115 struct net_device *dev;
115 unsigned long post_buckets_active; 116 unsigned long post_buckets_active;
116}; 117};
117 118
@@ -132,7 +133,7 @@ static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
132static int mpt_lan_open(struct net_device *dev); 133static int mpt_lan_open(struct net_device *dev);
133static int mpt_lan_reset(struct net_device *dev); 134static int mpt_lan_reset(struct net_device *dev);
134static int mpt_lan_close(struct net_device *dev); 135static int mpt_lan_close(struct net_device *dev);
135static void mpt_lan_post_receive_buckets(void *dev_id); 136static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
136static void mpt_lan_wake_post_buckets_task(struct net_device *dev, 137static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
137 int priority); 138 int priority);
138static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); 139static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
@@ -345,7 +346,7 @@ mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
345 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; 346 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
346 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 347 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
347 } else { 348 } else {
348 mpt_lan_post_receive_buckets(dev); 349 mpt_lan_post_receive_buckets(priv);
349 netif_wake_queue(dev); 350 netif_wake_queue(dev);
350 } 351 }
351 352
@@ -441,7 +442,7 @@ mpt_lan_open(struct net_device *dev)
441 442
442 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); 443 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
443 444
444 mpt_lan_post_receive_buckets(dev); 445 mpt_lan_post_receive_buckets(priv);
445 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", 446 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
446 IOC_AND_NETDEV_NAMES_s_s(dev)); 447 IOC_AND_NETDEV_NAMES_s_s(dev));
447 448
@@ -854,7 +855,7 @@ mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
854 855
855 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { 856 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
856 if (priority) { 857 if (priority) {
857 schedule_work(&priv->post_buckets_task); 858 schedule_delayed_work(&priv->post_buckets_task, 0);
858 } else { 859 } else {
859 schedule_delayed_work(&priv->post_buckets_task, 1); 860 schedule_delayed_work(&priv->post_buckets_task, 1);
860 dioprintk((KERN_INFO MYNAM ": post_buckets queued on " 861 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
@@ -1188,10 +1189,9 @@ mpt_lan_receive_post_reply(struct net_device *dev,
1188/* Simple SGE's only at the moment */ 1189/* Simple SGE's only at the moment */
1189 1190
1190static void 1191static void
1191mpt_lan_post_receive_buckets(void *dev_id) 1192mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1192{ 1193{
1193 struct net_device *dev = dev_id; 1194 struct net_device *dev = priv->dev;
1194 struct mpt_lan_priv *priv = dev->priv;
1195 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 1195 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1196 MPT_FRAME_HDR *mf; 1196 MPT_FRAME_HDR *mf;
1197 LANReceivePostRequest_t *pRecvReq; 1197 LANReceivePostRequest_t *pRecvReq;
@@ -1335,6 +1335,13 @@ out:
1335 clear_bit(0, &priv->post_buckets_active); 1335 clear_bit(0, &priv->post_buckets_active);
1336} 1336}
1337 1337
1338static void
1339mpt_lan_post_receive_buckets_work(struct work_struct *work)
1340{
1341 mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1342 post_buckets_task.work));
1343}
1344
1338/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1345/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1339static struct net_device * 1346static struct net_device *
1340mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) 1347mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
@@ -1350,11 +1357,13 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1350 1357
1351 priv = netdev_priv(dev); 1358 priv = netdev_priv(dev);
1352 1359
1360 priv->dev = dev;
1353 priv->mpt_dev = mpt_dev; 1361 priv->mpt_dev = mpt_dev;
1354 priv->pnum = pnum; 1362 priv->pnum = pnum;
1355 1363
1356 memset(&priv->post_buckets_task, 0, sizeof(struct work_struct)); 1364 memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
1357 INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev); 1365 INIT_DELAYED_WORK(&priv->post_buckets_task,
1366 mpt_lan_post_receive_buckets_work);
1358 priv->post_buckets_active = 0; 1367 priv->post_buckets_active = 0;
1359 1368
1360 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", 1369 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b752a479f6db..4f0c530e47b0 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2006,9 +2006,10 @@ __mptsas_discovery_work(MPT_ADAPTER *ioc)
2006 *(Mutex LOCKED) 2006 *(Mutex LOCKED)
2007 */ 2007 */
2008static void 2008static void
2009mptsas_discovery_work(void * arg) 2009mptsas_discovery_work(struct work_struct *work)
2010{ 2010{
2011 struct mptsas_discovery_event *ev = arg; 2011 struct mptsas_discovery_event *ev =
2012 container_of(work, struct mptsas_discovery_event, work);
2012 MPT_ADAPTER *ioc = ev->ioc; 2013 MPT_ADAPTER *ioc = ev->ioc;
2013 2014
2014 mutex_lock(&ioc->sas_discovery_mutex); 2015 mutex_lock(&ioc->sas_discovery_mutex);
@@ -2068,9 +2069,9 @@ mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id)
2068 * Work queue thread to clear the persitency table 2069 * Work queue thread to clear the persitency table
2069 */ 2070 */
2070static void 2071static void
2071mptsas_persist_clear_table(void * arg) 2072mptsas_persist_clear_table(struct work_struct *work)
2072{ 2073{
2073 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 2074 MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
2074 2075
2075 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); 2076 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2076} 2077}
@@ -2093,9 +2094,10 @@ mptsas_reprobe_target(struct scsi_target *starget, int uld_attach)
2093 * Work queue thread to handle SAS hotplug events 2094 * Work queue thread to handle SAS hotplug events
2094 */ 2095 */
2095static void 2096static void
2096mptsas_hotplug_work(void *arg) 2097mptsas_hotplug_work(struct work_struct *work)
2097{ 2098{
2098 struct mptsas_hotplug_event *ev = arg; 2099 struct mptsas_hotplug_event *ev =
2100 container_of(work, struct mptsas_hotplug_event, work);
2099 MPT_ADAPTER *ioc = ev->ioc; 2101 MPT_ADAPTER *ioc = ev->ioc;
2100 struct mptsas_phyinfo *phy_info; 2102 struct mptsas_phyinfo *phy_info;
2101 struct sas_rphy *rphy; 2103 struct sas_rphy *rphy;
@@ -2341,7 +2343,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc,
2341 break; 2343 break;
2342 } 2344 }
2343 2345
2344 INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 2346 INIT_WORK(&ev->work, mptsas_hotplug_work);
2345 ev->ioc = ioc; 2347 ev->ioc = ioc;
2346 ev->handle = le16_to_cpu(sas_event_data->DevHandle); 2348 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
2347 ev->parent_handle = 2349 ev->parent_handle =
@@ -2366,7 +2368,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc,
2366 * Persistent table is full. 2368 * Persistent table is full.
2367 */ 2369 */
2368 INIT_WORK(&ioc->sas_persist_task, 2370 INIT_WORK(&ioc->sas_persist_task,
2369 mptsas_persist_clear_table, (void *)ioc); 2371 mptsas_persist_clear_table);
2370 schedule_work(&ioc->sas_persist_task); 2372 schedule_work(&ioc->sas_persist_task);
2371 break; 2373 break;
2372 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 2374 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
@@ -2395,7 +2397,7 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
2395 return; 2397 return;
2396 } 2398 }
2397 2399
2398 INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 2400 INIT_WORK(&ev->work, mptsas_hotplug_work);
2399 ev->ioc = ioc; 2401 ev->ioc = ioc;
2400 ev->id = raid_event_data->VolumeID; 2402 ev->id = raid_event_data->VolumeID;
2401 ev->event_type = MPTSAS_IGNORE_EVENT; 2403 ev->event_type = MPTSAS_IGNORE_EVENT;
@@ -2474,7 +2476,7 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
2474 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 2476 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2475 if (!ev) 2477 if (!ev)
2476 return; 2478 return;
2477 INIT_WORK(&ev->work, mptsas_discovery_work, ev); 2479 INIT_WORK(&ev->work, mptsas_discovery_work);
2478 ev->ioc = ioc; 2480 ev->ioc = ioc;
2479 schedule_work(&ev->work); 2481 schedule_work(&ev->work);
2480}; 2482};
@@ -2511,8 +2513,7 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
2511 break; 2513 break;
2512 case MPI_EVENT_PERSISTENT_TABLE_FULL: 2514 case MPI_EVENT_PERSISTENT_TABLE_FULL:
2513 INIT_WORK(&ioc->sas_persist_task, 2515 INIT_WORK(&ioc->sas_persist_task,
2514 mptsas_persist_clear_table, 2516 mptsas_persist_clear_table);
2515 (void *)ioc);
2516 schedule_work(&ioc->sas_persist_task); 2517 schedule_work(&ioc->sas_persist_task);
2517 break; 2518 break;
2518 case MPI_EVENT_SAS_DISCOVERY: 2519 case MPI_EVENT_SAS_DISCOVERY:
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e4cc3dd5fc9f..f422c0d0621c 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -646,9 +646,10 @@ struct work_queue_wrapper {
646 int disk; 646 int disk;
647}; 647};
648 648
649static void mpt_work_wrapper(void *data) 649static void mpt_work_wrapper(struct work_struct *work)
650{ 650{
651 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 651 struct work_queue_wrapper *wqw =
652 container_of(work, struct work_queue_wrapper, work);
652 struct _MPT_SCSI_HOST *hd = wqw->hd; 653 struct _MPT_SCSI_HOST *hd = wqw->hd;
653 struct Scsi_Host *shost = hd->ioc->sh; 654 struct Scsi_Host *shost = hd->ioc->sh;
654 struct scsi_device *sdev; 655 struct scsi_device *sdev;
@@ -695,7 +696,7 @@ static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk)
695 disk); 696 disk);
696 return; 697 return;
697 } 698 }
698 INIT_WORK(&wqw->work, mpt_work_wrapper, wqw); 699 INIT_WORK(&wqw->work, mpt_work_wrapper);
699 wqw->hd = hd; 700 wqw->hd = hd;
700 wqw->disk = disk; 701 wqw->disk = disk;
701 702
@@ -784,9 +785,10 @@ MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
784 * renegotiate for a given target 785 * renegotiate for a given target
785 */ 786 */
786static void 787static void
787mptspi_dv_renegotiate_work(void *data) 788mptspi_dv_renegotiate_work(struct work_struct *work)
788{ 789{
789 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 790 struct work_queue_wrapper *wqw =
791 container_of(work, struct work_queue_wrapper, work);
790 struct _MPT_SCSI_HOST *hd = wqw->hd; 792 struct _MPT_SCSI_HOST *hd = wqw->hd;
791 struct scsi_device *sdev; 793 struct scsi_device *sdev;
792 794
@@ -804,7 +806,7 @@ mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
804 if (!wqw) 806 if (!wqw)
805 return; 807 return;
806 808
807 INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); 809 INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
808 wqw->hd = hd; 810 wqw->hd = hd;
809 811
810 schedule_work(&wqw->work); 812 schedule_work(&wqw->work);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 64130227574f..7fc7399bd2ec 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -232,7 +232,7 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
232 break; 232 break;
233 } 233 }
234 234
235 INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt); 235 INIT_WORK(&evt->work, drv->event);
236 queue_work(drv->event_queue, &evt->work); 236 queue_work(drv->event_queue, &evt->work);
237 return 1; 237 return 1;
238 } 238 }
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index a2350640384b..9e529d8dd5cb 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -371,8 +371,10 @@ static int i2o_exec_remove(struct device *dev)
371 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY 371 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
372 * again, otherwise send LCT NOTIFY to get informed on next LCT change. 372 * again, otherwise send LCT NOTIFY to get informed on next LCT change.
373 */ 373 */
374static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work) 374static void i2o_exec_lct_modified(struct work_struct *_work)
375{ 375{
376 struct i2o_exec_lct_notify_work *work =
377 container_of(_work, struct i2o_exec_lct_notify_work, work);
376 u32 change_ind = 0; 378 u32 change_ind = 0;
377 struct i2o_controller *c = work->c; 379 struct i2o_controller *c = work->c;
378 380
@@ -439,8 +441,7 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
439 441
440 work->c = c; 442 work->c = c;
441 443
442 INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified, 444 INIT_WORK(&work->work, i2o_exec_lct_modified);
443 work);
444 queue_work(i2o_exec_driver.event_queue, &work->work); 445 queue_work(i2o_exec_driver.event_queue, &work->work);
445 return 1; 446 return 1;
446 } 447 }
@@ -460,13 +461,15 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
460 461
461/** 462/**
462 * i2o_exec_event - Event handling function 463 * i2o_exec_event - Event handling function
463 * @evt: Event which occurs 464 * @work: Work item in occurring event
464 * 465 *
465 * Handles events send by the Executive device. At the moment does not do 466 * Handles events send by the Executive device. At the moment does not do
466 * anything useful. 467 * anything useful.
467 */ 468 */
468static void i2o_exec_event(struct i2o_event *evt) 469static void i2o_exec_event(struct work_struct *work)
469{ 470{
471 struct i2o_event *evt = container_of(work, struct i2o_event, work);
472
470 if (likely(evt->i2o_dev)) 473 if (likely(evt->i2o_dev))
471 osm_debug("Event received from device: %d\n", 474 osm_debug("Event received from device: %d\n",
472 evt->i2o_dev->lct_data.tid); 475 evt->i2o_dev->lct_data.tid);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index eaba81bf2eca..70ae00253321 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -419,16 +419,18 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
419 419
420/** 420/**
421 * i2o_block_delayed_request_fn - delayed request queue function 421 * i2o_block_delayed_request_fn - delayed request queue function
422 * delayed_request: the delayed request with the queue to start 422 * @work: the delayed request with the queue to start
423 * 423 *
424 * If the request queue is stopped for a disk, and there is no open 424 * If the request queue is stopped for a disk, and there is no open
425 * request, a new event is created, which calls this function to start 425 * request, a new event is created, which calls this function to start
426 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never 426 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
427 * be started again. 427 * be started again.
428 */ 428 */
429static void i2o_block_delayed_request_fn(void *delayed_request) 429static void i2o_block_delayed_request_fn(struct work_struct *work)
430{ 430{
431 struct i2o_block_delayed_request *dreq = delayed_request; 431 struct i2o_block_delayed_request *dreq =
432 container_of(work, struct i2o_block_delayed_request,
433 work.work);
432 struct request_queue *q = dreq->queue; 434 struct request_queue *q = dreq->queue;
433 unsigned long flags; 435 unsigned long flags;
434 436
@@ -538,8 +540,9 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
538 return 1; 540 return 1;
539}; 541};
540 542
541static void i2o_block_event(struct i2o_event *evt) 543static void i2o_block_event(struct work_struct *work)
542{ 544{
545 struct i2o_event *evt = container_of(work, struct i2o_event, work);
543 osm_debug("event received\n"); 546 osm_debug("event received\n");
544 kfree(evt); 547 kfree(evt);
545}; 548};
@@ -938,8 +941,8 @@ static void i2o_block_request_fn(struct request_queue *q)
938 continue; 941 continue;
939 942
940 dreq->queue = q; 943 dreq->queue = q;
941 INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, 944 INIT_DELAYED_WORK(&dreq->work,
942 dreq); 945 i2o_block_delayed_request_fn);
943 946
944 if (!queue_delayed_work(i2o_block_driver.event_queue, 947 if (!queue_delayed_work(i2o_block_driver.event_queue,
945 &dreq->work, 948 &dreq->work,
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
index 4fdaa5bda412..d9fdc95b440d 100644
--- a/drivers/message/i2o/i2o_block.h
+++ b/drivers/message/i2o/i2o_block.h
@@ -96,7 +96,7 @@ struct i2o_block_request {
96 96
97/* I2O Block device delayed request */ 97/* I2O Block device delayed request */
98struct i2o_block_delayed_request { 98struct i2o_block_delayed_request {
99 struct work_struct work; 99 struct delayed_work work;
100 struct request_queue *queue; 100 struct request_queue *queue;
101}; 101};
102 102
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 1ba8754e9383..2ab7add78f94 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -33,9 +33,10 @@ static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
33 spin_unlock_irqrestore(&fm->lock, flags); 33 spin_unlock_irqrestore(&fm->lock, flags);
34} 34}
35 35
36static void tifm_7xx1_remove_media(void *adapter) 36static void tifm_7xx1_remove_media(struct work_struct *work)
37{ 37{
38 struct tifm_adapter *fm = adapter; 38 struct tifm_adapter *fm =
39 container_of(work, struct tifm_adapter, media_remover);
39 unsigned long flags; 40 unsigned long flags;
40 int cnt; 41 int cnt;
41 struct tifm_dev *sock; 42 struct tifm_dev *sock;
@@ -169,9 +170,10 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
169 return base_addr + ((sock_num + 1) << 10); 170 return base_addr + ((sock_num + 1) << 10);
170} 171}
171 172
172static void tifm_7xx1_insert_media(void *adapter) 173static void tifm_7xx1_insert_media(struct work_struct *work)
173{ 174{
174 struct tifm_adapter *fm = adapter; 175 struct tifm_adapter *fm =
176 container_of(work, struct tifm_adapter, media_inserter);
175 unsigned long flags; 177 unsigned long flags;
176 tifm_media_id media_id; 178 tifm_media_id media_id;
177 char *card_name = "xx"; 179 char *card_name = "xx";
@@ -261,7 +263,7 @@ static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
261 spin_unlock_irqrestore(&fm->lock, flags); 263 spin_unlock_irqrestore(&fm->lock, flags);
262 flush_workqueue(fm->wq); 264 flush_workqueue(fm->wq);
263 265
264 tifm_7xx1_remove_media(fm); 266 tifm_7xx1_remove_media(&fm->media_remover);
265 267
266 pci_set_power_state(dev, PCI_D3hot); 268 pci_set_power_state(dev, PCI_D3hot);
267 pci_disable_device(dev); 269 pci_disable_device(dev);
@@ -328,8 +330,8 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
328 if (!fm->sockets) 330 if (!fm->sockets)
329 goto err_out_free; 331 goto err_out_free;
330 332
331 INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm); 333 INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
332 INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm); 334 INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
333 fm->eject = tifm_7xx1_eject; 335 fm->eject = tifm_7xx1_eject;
334 pci_set_drvdata(dev, fm); 336 pci_set_drvdata(dev, fm);
335 337
@@ -384,7 +386,7 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
384 386
385 flush_workqueue(fm->wq); 387 flush_workqueue(fm->wq);
386 388
387 tifm_7xx1_remove_media(fm); 389 tifm_7xx1_remove_media(&fm->media_remover);
388 390
389 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 391 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
390 free_irq(dev->irq, fm); 392 free_irq(dev->irq, fm);
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 9d190022a490..6f2a282e2b97 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -1419,18 +1419,16 @@ static void mmc_setup(struct mmc_host *host)
1419 */ 1419 */
1420void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1420void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1421{ 1421{
1422 if (delay) 1422 mmc_schedule_delayed_work(&host->detect, delay);
1423 mmc_schedule_delayed_work(&host->detect, delay);
1424 else
1425 mmc_schedule_work(&host->detect);
1426} 1423}
1427 1424
1428EXPORT_SYMBOL(mmc_detect_change); 1425EXPORT_SYMBOL(mmc_detect_change);
1429 1426
1430 1427
1431static void mmc_rescan(void *data) 1428static void mmc_rescan(struct work_struct *work)
1432{ 1429{
1433 struct mmc_host *host = data; 1430 struct mmc_host *host =
1431 container_of(work, struct mmc_host, detect.work);
1434 struct list_head *l, *n; 1432 struct list_head *l, *n;
1435 unsigned char power_mode; 1433 unsigned char power_mode;
1436 1434
@@ -1513,7 +1511,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
1513 spin_lock_init(&host->lock); 1511 spin_lock_init(&host->lock);
1514 init_waitqueue_head(&host->wq); 1512 init_waitqueue_head(&host->wq);
1515 INIT_LIST_HEAD(&host->cards); 1513 INIT_LIST_HEAD(&host->cards);
1516 INIT_WORK(&host->detect, mmc_rescan, host); 1514 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
1517 1515
1518 /* 1516 /*
1519 * By default, hosts do not support SGIO or large requests. 1517 * By default, hosts do not support SGIO or large requests.
@@ -1611,7 +1609,7 @@ EXPORT_SYMBOL(mmc_suspend_host);
1611 */ 1609 */
1612int mmc_resume_host(struct mmc_host *host) 1610int mmc_resume_host(struct mmc_host *host)
1613{ 1611{
1614 mmc_rescan(host); 1612 mmc_rescan(&host->detect.work);
1615 1613
1616 return 0; 1614 return 0;
1617} 1615}
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
index cd5e0ab3d84b..149affe0b686 100644
--- a/drivers/mmc/mmc.h
+++ b/drivers/mmc/mmc.h
@@ -20,6 +20,6 @@ void mmc_remove_host_sysfs(struct mmc_host *host);
20void mmc_free_host_sysfs(struct mmc_host *host); 20void mmc_free_host_sysfs(struct mmc_host *host);
21 21
22int mmc_schedule_work(struct work_struct *work); 22int mmc_schedule_work(struct work_struct *work);
23int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay); 23int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay);
24void mmc_flush_scheduled_work(void); 24void mmc_flush_scheduled_work(void);
25#endif 25#endif
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index ac5329636045..e334acd045bc 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -321,17 +321,9 @@ void mmc_free_host_sysfs(struct mmc_host *host)
321static struct workqueue_struct *workqueue; 321static struct workqueue_struct *workqueue;
322 322
323/* 323/*
324 * Internal function. Schedule work in the MMC work queue.
325 */
326int mmc_schedule_work(struct work_struct *work)
327{
328 return queue_work(workqueue, work);
329}
330
331/*
332 * Internal function. Schedule delayed work in the MMC work queue. 324 * Internal function. Schedule delayed work in the MMC work queue.
333 */ 325 */
334int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay) 326int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
335{ 327{
336 return queue_delayed_work(workqueue, work, delay); 328 return queue_delayed_work(workqueue, work, delay);
337} 329}
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index 0fdc55b08a6d..e846499a004c 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -99,7 +99,7 @@ struct tifm_sd {
99 99
100 struct mmc_request *req; 100 struct mmc_request *req;
101 struct work_struct cmd_handler; 101 struct work_struct cmd_handler;
102 struct work_struct abort_handler; 102 struct delayed_work abort_handler;
103 wait_queue_head_t can_eject; 103 wait_queue_head_t can_eject;
104 104
105 size_t written_blocks; 105 size_t written_blocks;
@@ -496,9 +496,9 @@ err_out:
496 mmc_request_done(mmc, mrq); 496 mmc_request_done(mmc, mrq);
497} 497}
498 498
499static void tifm_sd_end_cmd(void *data) 499static void tifm_sd_end_cmd(struct work_struct *work)
500{ 500{
501 struct tifm_sd *host = data; 501 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
502 struct tifm_dev *sock = host->dev; 502 struct tifm_dev *sock = host->dev;
503 struct mmc_host *mmc = tifm_get_drvdata(sock); 503 struct mmc_host *mmc = tifm_get_drvdata(sock);
504 struct mmc_request *mrq; 504 struct mmc_request *mrq;
@@ -608,9 +608,9 @@ err_out:
608 mmc_request_done(mmc, mrq); 608 mmc_request_done(mmc, mrq);
609} 609}
610 610
611static void tifm_sd_end_cmd_nodma(void *data) 611static void tifm_sd_end_cmd_nodma(struct work_struct *work)
612{ 612{
613 struct tifm_sd *host = (struct tifm_sd*)data; 613 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
614 struct tifm_dev *sock = host->dev; 614 struct tifm_dev *sock = host->dev;
615 struct mmc_host *mmc = tifm_get_drvdata(sock); 615 struct mmc_host *mmc = tifm_get_drvdata(sock);
616 struct mmc_request *mrq; 616 struct mmc_request *mrq;
@@ -661,11 +661,14 @@ static void tifm_sd_end_cmd_nodma(void *data)
661 mmc_request_done(mmc, mrq); 661 mmc_request_done(mmc, mrq);
662} 662}
663 663
664static void tifm_sd_abort(void *data) 664static void tifm_sd_abort(struct work_struct *work)
665{ 665{
666 struct tifm_sd *host =
667 container_of(work, struct tifm_sd, abort_handler.work);
668
666 printk(KERN_ERR DRIVER_NAME 669 printk(KERN_ERR DRIVER_NAME
667 ": card failed to respond for a long period of time"); 670 ": card failed to respond for a long period of time");
668 tifm_eject(((struct tifm_sd*)data)->dev); 671 tifm_eject(host->dev);
669} 672}
670 673
671static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) 674static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -762,9 +765,9 @@ static struct mmc_host_ops tifm_sd_ops = {
762 .get_ro = tifm_sd_ro 765 .get_ro = tifm_sd_ro
763}; 766};
764 767
765static void tifm_sd_register_host(void *data) 768static void tifm_sd_register_host(struct work_struct *work)
766{ 769{
767 struct tifm_sd *host = (struct tifm_sd*)data; 770 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
768 struct tifm_dev *sock = host->dev; 771 struct tifm_dev *sock = host->dev;
769 struct mmc_host *mmc = tifm_get_drvdata(sock); 772 struct mmc_host *mmc = tifm_get_drvdata(sock);
770 unsigned long flags; 773 unsigned long flags;
@@ -772,8 +775,7 @@ static void tifm_sd_register_host(void *data)
772 spin_lock_irqsave(&sock->lock, flags); 775 spin_lock_irqsave(&sock->lock, flags);
773 host->flags |= HOST_REG; 776 host->flags |= HOST_REG;
774 PREPARE_WORK(&host->cmd_handler, 777 PREPARE_WORK(&host->cmd_handler,
775 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd, 778 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd);
776 data);
777 spin_unlock_irqrestore(&sock->lock, flags); 779 spin_unlock_irqrestore(&sock->lock, flags);
778 dev_dbg(&sock->dev, "adding host\n"); 780 dev_dbg(&sock->dev, "adding host\n");
779 mmc_add_host(mmc); 781 mmc_add_host(mmc);
@@ -799,8 +801,8 @@ static int tifm_sd_probe(struct tifm_dev *sock)
799 host->dev = sock; 801 host->dev = sock;
800 host->clk_div = 61; 802 host->clk_div = 61;
801 init_waitqueue_head(&host->can_eject); 803 init_waitqueue_head(&host->can_eject);
802 INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host); 804 INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
803 INIT_WORK(&host->abort_handler, tifm_sd_abort, host); 805 INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
804 806
805 tifm_set_drvdata(sock, mmc); 807 tifm_set_drvdata(sock, mmc);
806 sock->signal_irq = tifm_sd_signal_irq; 808 sock->signal_irq = tifm_sd_signal_irq;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index d02ed51abfcc..931028f672de 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -594,7 +594,7 @@ struct rtl8139_private {
594 u32 rx_config; 594 u32 rx_config;
595 struct rtl_extra_stats xstats; 595 struct rtl_extra_stats xstats;
596 596
597 struct work_struct thread; 597 struct delayed_work thread;
598 598
599 struct mii_if_info mii; 599 struct mii_if_info mii;
600 unsigned int regs_len; 600 unsigned int regs_len;
@@ -636,8 +636,8 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
636static void rtl8139_set_rx_mode (struct net_device *dev); 636static void rtl8139_set_rx_mode (struct net_device *dev);
637static void __set_rx_mode (struct net_device *dev); 637static void __set_rx_mode (struct net_device *dev);
638static void rtl8139_hw_start (struct net_device *dev); 638static void rtl8139_hw_start (struct net_device *dev);
639static void rtl8139_thread (void *_data); 639static void rtl8139_thread (struct work_struct *work);
640static void rtl8139_tx_timeout_task(void *_data); 640static void rtl8139_tx_timeout_task(struct work_struct *work);
641static const struct ethtool_ops rtl8139_ethtool_ops; 641static const struct ethtool_ops rtl8139_ethtool_ops;
642 642
643/* write MMIO register, with flush */ 643/* write MMIO register, with flush */
@@ -1010,7 +1010,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1010 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); 1010 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
1011 spin_lock_init (&tp->lock); 1011 spin_lock_init (&tp->lock);
1012 spin_lock_init (&tp->rx_lock); 1012 spin_lock_init (&tp->rx_lock);
1013 INIT_WORK(&tp->thread, rtl8139_thread, dev); 1013 INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
1014 tp->mii.dev = dev; 1014 tp->mii.dev = dev;
1015 tp->mii.mdio_read = mdio_read; 1015 tp->mii.mdio_read = mdio_read;
1016 tp->mii.mdio_write = mdio_write; 1016 tp->mii.mdio_write = mdio_write;
@@ -1596,15 +1596,16 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
1596 RTL_R8 (Config1)); 1596 RTL_R8 (Config1));
1597} 1597}
1598 1598
1599static void rtl8139_thread (void *_data) 1599static void rtl8139_thread (struct work_struct *work)
1600{ 1600{
1601 struct net_device *dev = _data; 1601 struct rtl8139_private *tp =
1602 struct rtl8139_private *tp = netdev_priv(dev); 1602 container_of(work, struct rtl8139_private, thread.work);
1603 struct net_device *dev = tp->mii.dev;
1603 unsigned long thr_delay = next_tick; 1604 unsigned long thr_delay = next_tick;
1604 1605
1605 if (tp->watchdog_fired) { 1606 if (tp->watchdog_fired) {
1606 tp->watchdog_fired = 0; 1607 tp->watchdog_fired = 0;
1607 rtl8139_tx_timeout_task(_data); 1608 rtl8139_tx_timeout_task(work);
1608 } else if (rtnl_trylock()) { 1609 } else if (rtnl_trylock()) {
1609 rtl8139_thread_iter (dev, tp, tp->mmio_addr); 1610 rtl8139_thread_iter (dev, tp, tp->mmio_addr);
1610 rtnl_unlock (); 1611 rtnl_unlock ();
@@ -1646,10 +1647,11 @@ static inline void rtl8139_tx_clear (struct rtl8139_private *tp)
1646 /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ 1647 /* XXX account for unsent Tx packets in tp->stats.tx_dropped */
1647} 1648}
1648 1649
1649static void rtl8139_tx_timeout_task (void *_data) 1650static void rtl8139_tx_timeout_task (struct work_struct *work)
1650{ 1651{
1651 struct net_device *dev = _data; 1652 struct rtl8139_private *tp =
1652 struct rtl8139_private *tp = netdev_priv(dev); 1653 container_of(work, struct rtl8139_private, thread.work);
1654 struct net_device *dev = tp->mii.dev;
1653 void __iomem *ioaddr = tp->mmio_addr; 1655 void __iomem *ioaddr = tp->mmio_addr;
1654 int i; 1656 int i;
1655 u8 tmp8; 1657 u8 tmp8;
@@ -1695,7 +1697,7 @@ static void rtl8139_tx_timeout (struct net_device *dev)
1695 struct rtl8139_private *tp = netdev_priv(dev); 1697 struct rtl8139_private *tp = netdev_priv(dev);
1696 1698
1697 if (!tp->have_thread) { 1699 if (!tp->have_thread) {
1698 INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev); 1700 INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task);
1699 schedule_delayed_work(&tp->thread, next_tick); 1701 schedule_delayed_work(&tp->thread, next_tick);
1700 } else 1702 } else
1701 tp->watchdog_fired = 1; 1703 tp->watchdog_fired = 1;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index fc2f1d1c7ead..5bacb7587df4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -4411,9 +4411,9 @@ bnx2_open(struct net_device *dev)
4411} 4411}
4412 4412
4413static void 4413static void
4414bnx2_reset_task(void *data) 4414bnx2_reset_task(struct work_struct *work)
4415{ 4415{
4416 struct bnx2 *bp = data; 4416 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4417 4417
4418 if (!netif_running(bp->dev)) 4418 if (!netif_running(bp->dev))
4419 return; 4419 return;
@@ -5702,7 +5702,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5702 bp->pdev = pdev; 5702 bp->pdev = pdev;
5703 5703
5704 spin_lock_init(&bp->phy_lock); 5704 spin_lock_init(&bp->phy_lock);
5705 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); 5705 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5706 5706
5707 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 5707 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5708 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); 5708 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index fd2cc13f7d97..c8126484c2be 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4066,9 +4066,9 @@ static int cas_alloc_rxds(struct cas *cp)
4066 return 0; 4066 return 0;
4067} 4067}
4068 4068
4069static void cas_reset_task(void *data) 4069static void cas_reset_task(struct work_struct *work)
4070{ 4070{
4071 struct cas *cp = (struct cas *) data; 4071 struct cas *cp = container_of(work, struct cas, reset_task);
4072#if 0 4072#if 0
4073 int pending = atomic_read(&cp->reset_task_pending); 4073 int pending = atomic_read(&cp->reset_task_pending);
4074#else 4074#else
@@ -5006,7 +5006,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5006 atomic_set(&cp->reset_task_pending_spare, 0); 5006 atomic_set(&cp->reset_task_pending_spare, 0);
5007 atomic_set(&cp->reset_task_pending_mtu, 0); 5007 atomic_set(&cp->reset_task_pending_mtu, 0);
5008#endif 5008#endif
5009 INIT_WORK(&cp->reset_task, cas_reset_task, cp); 5009 INIT_WORK(&cp->reset_task, cas_reset_task);
5010 5010
5011 /* Default link parameters */ 5011 /* Default link parameters */
5012 if (link_mode >= 0 && link_mode <= 6) 5012 if (link_mode >= 0 && link_mode <= 6)
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index b265941e1372..74758d2c7af8 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -279,7 +279,7 @@ struct adapter {
279 struct petp *tp; 279 struct petp *tp;
280 280
281 struct port_info port[MAX_NPORTS]; 281 struct port_info port[MAX_NPORTS];
282 struct work_struct stats_update_task; 282 struct delayed_work stats_update_task;
283 struct timer_list stats_update_timer; 283 struct timer_list stats_update_timer;
284 284
285 spinlock_t tpi_lock; 285 spinlock_t tpi_lock;
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 53bec6739812..de48eadddbc4 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -953,10 +953,11 @@ static void t1_netpoll(struct net_device *dev)
953 * Periodic accumulation of MAC statistics. This is used only if the MAC 953 * Periodic accumulation of MAC statistics. This is used only if the MAC
954 * does not have any other way to prevent stats counter overflow. 954 * does not have any other way to prevent stats counter overflow.
955 */ 955 */
956static void mac_stats_task(void *data) 956static void mac_stats_task(struct work_struct *work)
957{ 957{
958 int i; 958 int i;
959 struct adapter *adapter = data; 959 struct adapter *adapter =
960 container_of(work, struct adapter, stats_update_task.work);
960 961
961 for_each_port(adapter, i) { 962 for_each_port(adapter, i) {
962 struct port_info *p = &adapter->port[i]; 963 struct port_info *p = &adapter->port[i];
@@ -977,9 +978,10 @@ static void mac_stats_task(void *data)
977/* 978/*
978 * Processes elmer0 external interrupts in process context. 979 * Processes elmer0 external interrupts in process context.
979 */ 980 */
980static void ext_intr_task(void *data) 981static void ext_intr_task(struct work_struct *work)
981{ 982{
982 struct adapter *adapter = data; 983 struct adapter *adapter =
984 container_of(work, struct adapter, ext_intr_handler_task);
983 985
984 t1_elmer0_ext_intr_handler(adapter); 986 t1_elmer0_ext_intr_handler(adapter);
985 987
@@ -1113,9 +1115,9 @@ static int __devinit init_one(struct pci_dev *pdev,
1113 spin_lock_init(&adapter->mac_lock); 1115 spin_lock_init(&adapter->mac_lock);
1114 1116
1115 INIT_WORK(&adapter->ext_intr_handler_task, 1117 INIT_WORK(&adapter->ext_intr_handler_task,
1116 ext_intr_task, adapter); 1118 ext_intr_task);
1117 INIT_WORK(&adapter->stats_update_task, mac_stats_task, 1119 INIT_DELAYED_WORK(&adapter->stats_update_task,
1118 adapter); 1120 mac_stats_task);
1119 1121
1120 pci_set_drvdata(pdev, netdev); 1122 pci_set_drvdata(pdev, netdev);
1121 } 1123 }
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3a8df479cbda..03bf164f9e8d 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2102,9 +2102,10 @@ static void e100_tx_timeout(struct net_device *netdev)
2102 schedule_work(&nic->tx_timeout_task); 2102 schedule_work(&nic->tx_timeout_task);
2103} 2103}
2104 2104
2105static void e100_tx_timeout_task(struct net_device *netdev) 2105static void e100_tx_timeout_task(struct work_struct *work)
2106{ 2106{
2107 struct nic *nic = netdev_priv(netdev); 2107 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2108 struct net_device *netdev = nic->netdev;
2108 2109
2109 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", 2110 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2110 readb(&nic->csr->scb.status)); 2111 readb(&nic->csr->scb.status));
@@ -2637,8 +2638,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2637 nic->blink_timer.function = e100_blink_led; 2638 nic->blink_timer.function = e100_blink_led;
2638 nic->blink_timer.data = (unsigned long)nic; 2639 nic->blink_timer.data = (unsigned long)nic;
2639 2640
2640 INIT_WORK(&nic->tx_timeout_task, 2641 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2641 (void (*)(void *))e100_tx_timeout_task, netdev);
2642 2642
2643 if((err = e100_alloc(nic))) { 2643 if((err = e100_alloc(nic))) {
2644 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); 2644 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 32dde0adb683..73f3a85fd238 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -190,7 +190,7 @@ void e1000_set_ethtool_ops(struct net_device *netdev);
190static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 190static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
191static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 191static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
192static void e1000_tx_timeout(struct net_device *dev); 192static void e1000_tx_timeout(struct net_device *dev);
193static void e1000_reset_task(struct net_device *dev); 193static void e1000_reset_task(struct work_struct *work);
194static void e1000_smartspeed(struct e1000_adapter *adapter); 194static void e1000_smartspeed(struct e1000_adapter *adapter);
195static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 195static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
196 struct sk_buff *skb); 196 struct sk_buff *skb);
@@ -914,8 +914,7 @@ e1000_probe(struct pci_dev *pdev,
914 adapter->phy_info_timer.function = &e1000_update_phy_info; 914 adapter->phy_info_timer.function = &e1000_update_phy_info;
915 adapter->phy_info_timer.data = (unsigned long) adapter; 915 adapter->phy_info_timer.data = (unsigned long) adapter;
916 916
917 INIT_WORK(&adapter->reset_task, 917 INIT_WORK(&adapter->reset_task, e1000_reset_task);
918 (void (*)(void *))e1000_reset_task, netdev);
919 918
920 e1000_check_options(adapter); 919 e1000_check_options(adapter);
921 920
@@ -3306,9 +3305,10 @@ e1000_tx_timeout(struct net_device *netdev)
3306} 3305}
3307 3306
3308static void 3307static void
3309e1000_reset_task(struct net_device *netdev) 3308e1000_reset_task(struct work_struct *work)
3310{ 3309{
3311 struct e1000_adapter *adapter = netdev_priv(netdev); 3310 struct e1000_adapter *adapter =
3311 container_of(work, struct e1000_adapter, reset_task);
3312 3312
3313 e1000_reinit_locked(adapter); 3313 e1000_reinit_locked(adapter);
3314} 3314}
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 6ad696101418..83fa32f72398 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2224,11 +2224,12 @@ static int ehea_stop(struct net_device *dev)
2224 return ret; 2224 return ret;
2225} 2225}
2226 2226
2227static void ehea_reset_port(void *data) 2227static void ehea_reset_port(struct work_struct *work)
2228{ 2228{
2229 int ret; 2229 int ret;
2230 struct net_device *dev = data; 2230 struct ehea_port *port =
2231 struct ehea_port *port = netdev_priv(dev); 2231 container_of(work, struct ehea_port, reset_task);
2232 struct net_device *dev = port->netdev;
2232 2233
2233 port->resets++; 2234 port->resets++;
2234 down(&port->port_lock); 2235 down(&port->port_lock);
@@ -2379,7 +2380,7 @@ static int ehea_setup_single_port(struct ehea_port *port,
2379 dev->tx_timeout = &ehea_tx_watchdog; 2380 dev->tx_timeout = &ehea_tx_watchdog;
2380 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 2381 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
2381 2382
2382 INIT_WORK(&port->reset_task, ehea_reset_port, dev); 2383 INIT_WORK(&port->reset_task, ehea_reset_port);
2383 2384
2384 ehea_set_ethtool_ops(dev); 2385 ehea_set_ethtool_ops(dev);
2385 2386
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 1ed9cccd3c11..3c33d6f6a6a6 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -168,8 +168,9 @@ struct baycom_state {
168 int magic; 168 int magic;
169 169
170 struct pardevice *pdev; 170 struct pardevice *pdev;
171 struct net_device *dev;
171 unsigned int work_running; 172 unsigned int work_running;
172 struct work_struct run_work; 173 struct delayed_work run_work;
173 unsigned int modem; 174 unsigned int modem;
174 unsigned int bitrate; 175 unsigned int bitrate;
175 unsigned char stat; 176 unsigned char stat;
@@ -659,16 +660,18 @@ static int receive(struct net_device *dev, int cnt)
659#define GETTICK(x) 660#define GETTICK(x)
660#endif /* __i386__ */ 661#endif /* __i386__ */
661 662
662static void epp_bh(struct net_device *dev) 663static void epp_bh(struct work_struct *work)
663{ 664{
665 struct net_device *dev;
664 struct baycom_state *bc; 666 struct baycom_state *bc;
665 struct parport *pp; 667 struct parport *pp;
666 unsigned char stat; 668 unsigned char stat;
667 unsigned char tmp[2]; 669 unsigned char tmp[2];
668 unsigned int time1 = 0, time2 = 0, time3 = 0; 670 unsigned int time1 = 0, time2 = 0, time3 = 0;
669 int cnt, cnt2; 671 int cnt, cnt2;
670 672
671 bc = netdev_priv(dev); 673 bc = container_of(work, struct baycom_state, run_work.work);
674 dev = bc->dev;
672 if (!bc->work_running) 675 if (!bc->work_running)
673 return; 676 return;
674 baycom_int_freq(bc); 677 baycom_int_freq(bc);
@@ -889,7 +892,7 @@ static int epp_open(struct net_device *dev)
889 return -EBUSY; 892 return -EBUSY;
890 } 893 }
891 dev->irq = /*pp->irq*/ 0; 894 dev->irq = /*pp->irq*/ 0;
892 INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev); 895 INIT_DELAYED_WORK(&bc->run_work, epp_bh);
893 bc->work_running = 1; 896 bc->work_running = 1;
894 bc->modem = EPP_CONVENTIONAL; 897 bc->modem = EPP_CONVENTIONAL;
895 if (eppconfig(bc)) 898 if (eppconfig(bc))
@@ -1213,6 +1216,7 @@ static void __init baycom_epp_dev_setup(struct net_device *dev)
1213 /* 1216 /*
1214 * initialize part of the baycom_state struct 1217 * initialize part of the baycom_state struct
1215 */ 1218 */
1219 bc->dev = dev;
1216 bc->magic = BAYCOM_MAGIC; 1220 bc->magic = BAYCOM_MAGIC;
1217 bc->cfg.fclk = 19666600; 1221 bc->cfg.fclk = 19666600;
1218 bc->cfg.bps = 9600; 1222 bc->cfg.bps = 9600;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b32c52ed19d7..f0c61f3b2a82 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -560,9 +560,9 @@ static inline int mcs_find_endpoints(struct mcs_cb *mcs,
560 return ret; 560 return ret;
561} 561}
562 562
563static void mcs_speed_work(void *arg) 563static void mcs_speed_work(struct work_struct *work)
564{ 564{
565 struct mcs_cb *mcs = arg; 565 struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
566 struct net_device *netdev = mcs->netdev; 566 struct net_device *netdev = mcs->netdev;
567 567
568 mcs_speed_change(mcs); 568 mcs_speed_change(mcs);
@@ -927,7 +927,7 @@ static int mcs_probe(struct usb_interface *intf,
927 irda_qos_bits_to_value(&mcs->qos); 927 irda_qos_bits_to_value(&mcs->qos);
928 928
929 /* Speed change work initialisation*/ 929 /* Speed change work initialisation*/
930 INIT_WORK(&mcs->work, mcs_speed_work, mcs); 930 INIT_WORK(&mcs->work, mcs_speed_work);
931 931
932 /* Override the network functions we need to use */ 932 /* Override the network functions we need to use */
933 ndev->hard_start_xmit = mcs_hard_xmit; 933 ndev->hard_start_xmit = mcs_hard_xmit;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 9fa294a546d6..2a57bc67ce35 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -22,7 +22,7 @@
22 22
23struct sir_fsm { 23struct sir_fsm {
24 struct semaphore sem; 24 struct semaphore sem;
25 struct work_struct work; 25 struct delayed_work work;
26 unsigned state, substate; 26 unsigned state, substate;
27 int param; 27 int param;
28 int result; 28 int result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 3b5854d10c17..17b0c3ab6201 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -100,9 +100,9 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
100 * Both must be unlocked/restarted on completion - but only on final exit. 100 * Both must be unlocked/restarted on completion - but only on final exit.
101 */ 101 */
102 102
103static void sirdev_config_fsm(void *data) 103static void sirdev_config_fsm(struct work_struct *work)
104{ 104{
105 struct sir_dev *dev = data; 105 struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
106 struct sir_fsm *fsm = &dev->fsm; 106 struct sir_fsm *fsm = &dev->fsm;
107 int next_state; 107 int next_state;
108 int ret = -1; 108 int ret = -1;
@@ -309,8 +309,8 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
309 fsm->param = param; 309 fsm->param = param;
310 fsm->result = 0; 310 fsm->result = 0;
311 311
312 INIT_WORK(&fsm->work, sirdev_config_fsm, dev); 312 INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
313 queue_work(irda_sir_wq, &fsm->work); 313 queue_delayed_work(irda_sir_wq, &fsm->work, 0);
314 return 0; 314 return 0;
315} 315}
316 316
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 2284e2ce1692..d6f4f185bf37 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -166,7 +166,7 @@ struct veth_msg {
166 166
167struct veth_lpar_connection { 167struct veth_lpar_connection {
168 HvLpIndex remote_lp; 168 HvLpIndex remote_lp;
169 struct work_struct statemachine_wq; 169 struct delayed_work statemachine_wq;
170 struct veth_msg *msgs; 170 struct veth_msg *msgs;
171 int num_events; 171 int num_events;
172 struct veth_cap_data local_caps; 172 struct veth_cap_data local_caps;
@@ -456,7 +456,7 @@ static struct kobj_type veth_port_ktype = {
456 456
457static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx) 457static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
458{ 458{
459 schedule_work(&cnx->statemachine_wq); 459 schedule_delayed_work(&cnx->statemachine_wq, 0);
460} 460}
461 461
462static void veth_take_cap(struct veth_lpar_connection *cnx, 462static void veth_take_cap(struct veth_lpar_connection *cnx,
@@ -638,9 +638,11 @@ static int veth_process_caps(struct veth_lpar_connection *cnx)
638} 638}
639 639
640/* FIXME: The gotos here are a bit dubious */ 640/* FIXME: The gotos here are a bit dubious */
641static void veth_statemachine(void *p) 641static void veth_statemachine(struct work_struct *work)
642{ 642{
643 struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p; 643 struct veth_lpar_connection *cnx =
644 container_of(work, struct veth_lpar_connection,
645 statemachine_wq.work);
644 int rlp = cnx->remote_lp; 646 int rlp = cnx->remote_lp;
645 int rc; 647 int rc;
646 648
@@ -827,7 +829,7 @@ static int veth_init_connection(u8 rlp)
827 829
828 cnx->remote_lp = rlp; 830 cnx->remote_lp = rlp;
829 spin_lock_init(&cnx->lock); 831 spin_lock_init(&cnx->lock);
830 INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx); 832 INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
831 833
832 init_timer(&cnx->ack_timer); 834 init_timer(&cnx->ack_timer);
833 cnx->ack_timer.function = veth_timed_ack; 835 cnx->ack_timer.function = veth_timed_ack;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7b127212e62b..e628126c9c49 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -106,7 +106,7 @@ static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); 106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
107void ixgb_set_ethtool_ops(struct net_device *netdev); 107void ixgb_set_ethtool_ops(struct net_device *netdev);
108static void ixgb_tx_timeout(struct net_device *dev); 108static void ixgb_tx_timeout(struct net_device *dev);
109static void ixgb_tx_timeout_task(struct net_device *dev); 109static void ixgb_tx_timeout_task(struct work_struct *work);
110static void ixgb_vlan_rx_register(struct net_device *netdev, 110static void ixgb_vlan_rx_register(struct net_device *netdev,
111 struct vlan_group *grp); 111 struct vlan_group *grp);
112static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 112static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -489,8 +489,7 @@ ixgb_probe(struct pci_dev *pdev,
489 adapter->watchdog_timer.function = &ixgb_watchdog; 489 adapter->watchdog_timer.function = &ixgb_watchdog;
490 adapter->watchdog_timer.data = (unsigned long)adapter; 490 adapter->watchdog_timer.data = (unsigned long)adapter;
491 491
492 INIT_WORK(&adapter->tx_timeout_task, 492 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
493 (void (*)(void *))ixgb_tx_timeout_task, netdev);
494 493
495 strcpy(netdev->name, "eth%d"); 494 strcpy(netdev->name, "eth%d");
496 if((err = register_netdev(netdev))) 495 if((err = register_netdev(netdev)))
@@ -1493,9 +1492,10 @@ ixgb_tx_timeout(struct net_device *netdev)
1493} 1492}
1494 1493
1495static void 1494static void
1496ixgb_tx_timeout_task(struct net_device *netdev) 1495ixgb_tx_timeout_task(struct work_struct *work)
1497{ 1496{
1498 struct ixgb_adapter *adapter = netdev_priv(netdev); 1497 struct ixgb_adapter *adapter =
1498 container_of(work, struct ixgb_adapter, tx_timeout_task);
1499 1499
1500 adapter->tx_timeout_count++; 1500 adapter->tx_timeout_count++;
1501 ixgb_down(adapter, TRUE); 1501 ixgb_down(adapter, TRUE);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 36350e6db1c1..38df42802386 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2615,9 +2615,10 @@ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
2615 * This watchdog is used to check whether the board has suffered 2615 * This watchdog is used to check whether the board has suffered
2616 * from a parity error and needs to be recovered. 2616 * from a parity error and needs to be recovered.
2617 */ 2617 */
2618static void myri10ge_watchdog(void *arg) 2618static void myri10ge_watchdog(struct work_struct *work)
2619{ 2619{
2620 struct myri10ge_priv *mgp = arg; 2620 struct myri10ge_priv *mgp =
2621 container_of(work, struct myri10ge_priv, watchdog_work);
2621 u32 reboot; 2622 u32 reboot;
2622 int status; 2623 int status;
2623 u16 cmd, vendor; 2624 u16 cmd, vendor;
@@ -2887,7 +2888,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2887 (unsigned long)mgp); 2888 (unsigned long)mgp);
2888 2889
2889 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); 2890 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
2890 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp); 2891 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
2891 status = register_netdev(netdev); 2892 status = register_netdev(netdev);
2892 if (status != 0) { 2893 if (status != 0) {
2893 dev_err(&pdev->dev, "register_netdev failed: %d\n", status); 2894 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index b0127c71a5b6..312e0e331712 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -427,6 +427,7 @@ struct ns83820 {
427 u8 __iomem *base; 427 u8 __iomem *base;
428 428
429 struct pci_dev *pci_dev; 429 struct pci_dev *pci_dev;
430 struct net_device *ndev;
430 431
431#ifdef NS83820_VLAN_ACCEL_SUPPORT 432#ifdef NS83820_VLAN_ACCEL_SUPPORT
432 struct vlan_group *vlgrp; 433 struct vlan_group *vlgrp;
@@ -631,10 +632,10 @@ static void fastcall rx_refill_atomic(struct net_device *ndev)
631} 632}
632 633
633/* REFILL */ 634/* REFILL */
634static inline void queue_refill(void *_dev) 635static inline void queue_refill(struct work_struct *work)
635{ 636{
636 struct net_device *ndev = _dev; 637 struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
637 struct ns83820 *dev = PRIV(ndev); 638 struct net_device *ndev = dev->ndev;
638 639
639 rx_refill(ndev, GFP_KERNEL); 640 rx_refill(ndev, GFP_KERNEL);
640 if (dev->rx_info.up) 641 if (dev->rx_info.up)
@@ -1841,6 +1842,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
1841 1842
1842 ndev = alloc_etherdev(sizeof(struct ns83820)); 1843 ndev = alloc_etherdev(sizeof(struct ns83820));
1843 dev = PRIV(ndev); 1844 dev = PRIV(ndev);
1845 dev->ndev = ndev;
1844 err = -ENOMEM; 1846 err = -ENOMEM;
1845 if (!dev) 1847 if (!dev)
1846 goto out; 1848 goto out;
@@ -1853,7 +1855,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
1853 SET_MODULE_OWNER(ndev); 1855 SET_MODULE_OWNER(ndev);
1854 SET_NETDEV_DEV(ndev, &pci_dev->dev); 1856 SET_NETDEV_DEV(ndev, &pci_dev->dev);
1855 1857
1856 INIT_WORK(&dev->tq_refill, queue_refill, ndev); 1858 INIT_WORK(&dev->tq_refill, queue_refill);
1857 tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); 1859 tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
1858 1860
1859 err = pci_enable_device(pci_dev); 1861 err = pci_enable_device(pci_dev);
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index f3914f58d67f..5de8850f2323 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -332,6 +332,7 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id);
332 */ 332 */
333 333
334typedef struct local_info_t { 334typedef struct local_info_t {
335 struct net_device *dev;
335 struct pcmcia_device *p_dev; 336 struct pcmcia_device *p_dev;
336 dev_node_t node; 337 dev_node_t node;
337 struct net_device_stats stats; 338 struct net_device_stats stats;
@@ -353,7 +354,7 @@ typedef struct local_info_t {
353 */ 354 */
354static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); 355static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
355static void do_tx_timeout(struct net_device *dev); 356static void do_tx_timeout(struct net_device *dev);
356static void xirc2ps_tx_timeout_task(void *data); 357static void xirc2ps_tx_timeout_task(struct work_struct *work);
357static struct net_device_stats *do_get_stats(struct net_device *dev); 358static struct net_device_stats *do_get_stats(struct net_device *dev);
358static void set_addresses(struct net_device *dev); 359static void set_addresses(struct net_device *dev);
359static void set_multicast_list(struct net_device *dev); 360static void set_multicast_list(struct net_device *dev);
@@ -567,6 +568,7 @@ xirc2ps_probe(struct pcmcia_device *link)
567 if (!dev) 568 if (!dev)
568 return -ENOMEM; 569 return -ENOMEM;
569 local = netdev_priv(dev); 570 local = netdev_priv(dev);
571 local->dev = dev;
570 local->p_dev = link; 572 local->p_dev = link;
571 link->priv = dev; 573 link->priv = dev;
572 574
@@ -591,7 +593,7 @@ xirc2ps_probe(struct pcmcia_device *link)
591#ifdef HAVE_TX_TIMEOUT 593#ifdef HAVE_TX_TIMEOUT
592 dev->tx_timeout = do_tx_timeout; 594 dev->tx_timeout = do_tx_timeout;
593 dev->watchdog_timeo = TX_TIMEOUT; 595 dev->watchdog_timeo = TX_TIMEOUT;
594 INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); 596 INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task);
595#endif 597#endif
596 598
597 return xirc2ps_config(link); 599 return xirc2ps_config(link);
@@ -1344,9 +1346,11 @@ xirc2ps_interrupt(int irq, void *dev_id)
1344/*====================================================================*/ 1346/*====================================================================*/
1345 1347
1346static void 1348static void
1347xirc2ps_tx_timeout_task(void *data) 1349xirc2ps_tx_timeout_task(struct work_struct *work)
1348{ 1350{
1349 struct net_device *dev = data; 1351 local_info_t *local =
1352 container_of(work, local_info_t, tx_timeout_task);
1353 struct net_device *dev = local->dev;
1350 /* reset the card */ 1354 /* reset the card */
1351 do_reset(dev,1); 1355 do_reset(dev,1);
1352 dev->trans_start = jiffies; 1356 dev->trans_start = jiffies;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 88237bdb5255..4044bb1ada86 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -397,7 +397,7 @@ out_unlock:
397EXPORT_SYMBOL(phy_start_aneg); 397EXPORT_SYMBOL(phy_start_aneg);
398 398
399 399
400static void phy_change(void *data); 400static void phy_change(struct work_struct *work);
401static void phy_timer(unsigned long data); 401static void phy_timer(unsigned long data);
402 402
403/* phy_start_machine: 403/* phy_start_machine:
@@ -555,7 +555,7 @@ int phy_start_interrupts(struct phy_device *phydev)
555{ 555{
556 int err = 0; 556 int err = 0;
557 557
558 INIT_WORK(&phydev->phy_queue, phy_change, phydev); 558 INIT_WORK(&phydev->phy_queue, phy_change);
559 559
560 if (request_irq(phydev->irq, phy_interrupt, 560 if (request_irq(phydev->irq, phy_interrupt,
561 IRQF_SHARED, 561 IRQF_SHARED,
@@ -598,10 +598,11 @@ EXPORT_SYMBOL(phy_stop_interrupts);
598 598
599 599
600/* Scheduled by the phy_interrupt/timer to handle PHY changes */ 600/* Scheduled by the phy_interrupt/timer to handle PHY changes */
601static void phy_change(void *data) 601static void phy_change(struct work_struct *work)
602{ 602{
603 int err; 603 int err;
604 struct phy_device *phydev = data; 604 struct phy_device *phydev =
605 container_of(work, struct phy_device, phy_queue);
605 606
606 err = phy_disable_interrupts(phydev); 607 err = phy_disable_interrupts(phydev);
607 608
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 71afb274498f..6bb085f54437 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -138,9 +138,9 @@ static const unsigned int net_debug = NET_DEBUG;
138#define PLIP_NIBBLE_WAIT 3000 138#define PLIP_NIBBLE_WAIT 3000
139 139
140/* Bottom halves */ 140/* Bottom halves */
141static void plip_kick_bh(struct net_device *dev); 141static void plip_kick_bh(struct work_struct *work);
142static void plip_bh(struct net_device *dev); 142static void plip_bh(struct work_struct *work);
143static void plip_timer_bh(struct net_device *dev); 143static void plip_timer_bh(struct work_struct *work);
144 144
145/* Interrupt handler */ 145/* Interrupt handler */
146static void plip_interrupt(int irq, void *dev_id); 146static void plip_interrupt(int irq, void *dev_id);
@@ -207,9 +207,10 @@ struct plip_local {
207 207
208struct net_local { 208struct net_local {
209 struct net_device_stats enet_stats; 209 struct net_device_stats enet_stats;
210 struct net_device *dev;
210 struct work_struct immediate; 211 struct work_struct immediate;
211 struct work_struct deferred; 212 struct delayed_work deferred;
212 struct work_struct timer; 213 struct delayed_work timer;
213 struct plip_local snd_data; 214 struct plip_local snd_data;
214 struct plip_local rcv_data; 215 struct plip_local rcv_data;
215 struct pardevice *pardev; 216 struct pardevice *pardev;
@@ -306,11 +307,11 @@ plip_init_netdev(struct net_device *dev)
306 nl->nibble = PLIP_NIBBLE_WAIT; 307 nl->nibble = PLIP_NIBBLE_WAIT;
307 308
308 /* Initialize task queue structures */ 309 /* Initialize task queue structures */
309 INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev); 310 INIT_WORK(&nl->immediate, plip_bh);
310 INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev); 311 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
311 312
312 if (dev->irq == -1) 313 if (dev->irq == -1)
313 INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev); 314 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
314 315
315 spin_lock_init(&nl->lock); 316 spin_lock_init(&nl->lock);
316} 317}
@@ -319,9 +320,10 @@ plip_init_netdev(struct net_device *dev)
319 This routine is kicked by do_timer(). 320 This routine is kicked by do_timer().
320 Request `plip_bh' to be invoked. */ 321 Request `plip_bh' to be invoked. */
321static void 322static void
322plip_kick_bh(struct net_device *dev) 323plip_kick_bh(struct work_struct *work)
323{ 324{
324 struct net_local *nl = netdev_priv(dev); 325 struct net_local *nl =
326 container_of(work, struct net_local, deferred.work);
325 327
326 if (nl->is_deferred) 328 if (nl->is_deferred)
327 schedule_work(&nl->immediate); 329 schedule_work(&nl->immediate);
@@ -362,9 +364,9 @@ static const plip_func connection_state_table[] =
362 364
363/* Bottom half handler of PLIP. */ 365/* Bottom half handler of PLIP. */
364static void 366static void
365plip_bh(struct net_device *dev) 367plip_bh(struct work_struct *work)
366{ 368{
367 struct net_local *nl = netdev_priv(dev); 369 struct net_local *nl = container_of(work, struct net_local, immediate);
368 struct plip_local *snd = &nl->snd_data; 370 struct plip_local *snd = &nl->snd_data;
369 struct plip_local *rcv = &nl->rcv_data; 371 struct plip_local *rcv = &nl->rcv_data;
370 plip_func f; 372 plip_func f;
@@ -372,20 +374,21 @@ plip_bh(struct net_device *dev)
372 374
373 nl->is_deferred = 0; 375 nl->is_deferred = 0;
374 f = connection_state_table[nl->connection]; 376 f = connection_state_table[nl->connection];
375 if ((r = (*f)(dev, nl, snd, rcv)) != OK 377 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
376 && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) { 378 && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
377 nl->is_deferred = 1; 379 nl->is_deferred = 1;
378 schedule_delayed_work(&nl->deferred, 1); 380 schedule_delayed_work(&nl->deferred, 1);
379 } 381 }
380} 382}
381 383
382static void 384static void
383plip_timer_bh(struct net_device *dev) 385plip_timer_bh(struct work_struct *work)
384{ 386{
385 struct net_local *nl = netdev_priv(dev); 387 struct net_local *nl =
388 container_of(work, struct net_local, timer.work);
386 389
387 if (!(atomic_read (&nl->kill_timer))) { 390 if (!(atomic_read (&nl->kill_timer))) {
388 plip_interrupt (-1, dev); 391 plip_interrupt (-1, nl->dev);
389 392
390 schedule_delayed_work(&nl->timer, 1); 393 schedule_delayed_work(&nl->timer, 1);
391 } 394 }
@@ -1284,6 +1287,7 @@ static void plip_attach (struct parport *port)
1284 } 1287 }
1285 1288
1286 nl = netdev_priv(dev); 1289 nl = netdev_priv(dev);
1290 nl->dev = dev;
1287 nl->pardev = parport_register_device(port, name, plip_preempt, 1291 nl->pardev = parport_register_device(port, name, plip_preempt,
1288 plip_wakeup, plip_interrupt, 1292 plip_wakeup, plip_interrupt,
1289 0, dev); 1293 0, dev);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index ec640f6229ae..d79d141a601d 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2008,7 +2008,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2008 "%s: Another function issued a reset to the " 2008 "%s: Another function issued a reset to the "
2009 "chip. ISR value = %x.\n", ndev->name, value); 2009 "chip. ISR value = %x.\n", ndev->name, value);
2010 } 2010 }
2011 queue_work(qdev->workqueue, &qdev->reset_work); 2011 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2012 spin_unlock(&qdev->adapter_lock); 2012 spin_unlock(&qdev->adapter_lock);
2013 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2013 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2014 ql_disable_interrupts(qdev); 2014 ql_disable_interrupts(qdev);
@@ -3182,11 +3182,13 @@ static void ql3xxx_tx_timeout(struct net_device *ndev)
3182 /* 3182 /*
3183 * Wake up the worker to process this event. 3183 * Wake up the worker to process this event.
3184 */ 3184 */
3185 queue_work(qdev->workqueue, &qdev->tx_timeout_work); 3185 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3186} 3186}
3187 3187
3188static void ql_reset_work(struct ql3_adapter *qdev) 3188static void ql_reset_work(struct work_struct *work)
3189{ 3189{
3190 struct ql3_adapter *qdev =
3191 container_of(work, struct ql3_adapter, reset_work.work);
3190 struct net_device *ndev = qdev->ndev; 3192 struct net_device *ndev = qdev->ndev;
3191 u32 value; 3193 u32 value;
3192 struct ql_tx_buf_cb *tx_cb; 3194 struct ql_tx_buf_cb *tx_cb;
@@ -3278,9 +3280,12 @@ static void ql_reset_work(struct ql3_adapter *qdev)
3278 } 3280 }
3279} 3281}
3280 3282
3281static void ql_tx_timeout_work(struct ql3_adapter *qdev) 3283static void ql_tx_timeout_work(struct work_struct *work)
3282{ 3284{
3283 ql_cycle_adapter(qdev,QL_DO_RESET); 3285 struct ql3_adapter *qdev =
3286 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3287
3288 ql_cycle_adapter(qdev, QL_DO_RESET);
3284} 3289}
3285 3290
3286static void ql_get_board_info(struct ql3_adapter *qdev) 3291static void ql_get_board_info(struct ql3_adapter *qdev)
@@ -3459,9 +3464,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3459 netif_stop_queue(ndev); 3464 netif_stop_queue(ndev);
3460 3465
3461 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3466 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3462 INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev); 3467 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3463 INIT_WORK(&qdev->tx_timeout_work, 3468 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3464 (void (*)(void *))ql_tx_timeout_work, qdev);
3465 3469
3466 init_timer(&qdev->adapter_timer); 3470 init_timer(&qdev->adapter_timer);
3467 qdev->adapter_timer.function = ql3xxx_timer; 3471 qdev->adapter_timer.function = ql3xxx_timer;
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 65da2c0bfda6..ea94de7fd071 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -1186,8 +1186,8 @@ struct ql3_adapter {
1186 u32 numPorts; 1186 u32 numPorts;
1187 struct net_device_stats stats; 1187 struct net_device_stats stats;
1188 struct workqueue_struct *workqueue; 1188 struct workqueue_struct *workqueue;
1189 struct work_struct reset_work; 1189 struct delayed_work reset_work;
1190 struct work_struct tx_timeout_work; 1190 struct delayed_work tx_timeout_work;
1191 u32 max_frame_size; 1191 u32 max_frame_size;
1192}; 1192};
1193 1193
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 45d3ca431957..85a392fab5cc 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -424,6 +424,7 @@ struct ring_info {
424struct rtl8169_private { 424struct rtl8169_private {
425 void __iomem *mmio_addr; /* memory map physical address */ 425 void __iomem *mmio_addr; /* memory map physical address */
426 struct pci_dev *pci_dev; /* Index of PCI device */ 426 struct pci_dev *pci_dev; /* Index of PCI device */
427 struct net_device *dev;
427 struct net_device_stats stats; /* statistics of net device */ 428 struct net_device_stats stats; /* statistics of net device */
428 spinlock_t lock; /* spin lock flag */ 429 spinlock_t lock; /* spin lock flag */
429 u32 msg_enable; 430 u32 msg_enable;
@@ -455,7 +456,7 @@ struct rtl8169_private {
455 void (*phy_reset_enable)(void __iomem *); 456 void (*phy_reset_enable)(void __iomem *);
456 unsigned int (*phy_reset_pending)(void __iomem *); 457 unsigned int (*phy_reset_pending)(void __iomem *);
457 unsigned int (*link_ok)(void __iomem *); 458 unsigned int (*link_ok)(void __iomem *);
458 struct work_struct task; 459 struct delayed_work task;
459 unsigned wol_enabled : 1; 460 unsigned wol_enabled : 1;
460}; 461};
461 462
@@ -1510,6 +1511,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1510 SET_MODULE_OWNER(dev); 1511 SET_MODULE_OWNER(dev);
1511 SET_NETDEV_DEV(dev, &pdev->dev); 1512 SET_NETDEV_DEV(dev, &pdev->dev);
1512 tp = netdev_priv(dev); 1513 tp = netdev_priv(dev);
1514 tp->dev = dev;
1513 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 1515 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1514 1516
1515 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 1517 /* enable device (incl. PCI PM wakeup and hotplug setup) */
@@ -1782,7 +1784,7 @@ static int rtl8169_open(struct net_device *dev)
1782 if (retval < 0) 1784 if (retval < 0)
1783 goto err_free_rx; 1785 goto err_free_rx;
1784 1786
1785 INIT_WORK(&tp->task, NULL, dev); 1787 INIT_DELAYED_WORK(&tp->task, NULL);
1786 1788
1787 rtl8169_hw_start(dev); 1789 rtl8169_hw_start(dev);
1788 1790
@@ -2105,11 +2107,11 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
2105 tp->cur_tx = tp->dirty_tx = 0; 2107 tp->cur_tx = tp->dirty_tx = 0;
2106} 2108}
2107 2109
2108static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *)) 2110static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
2109{ 2111{
2110 struct rtl8169_private *tp = netdev_priv(dev); 2112 struct rtl8169_private *tp = netdev_priv(dev);
2111 2113
2112 PREPARE_WORK(&tp->task, task, dev); 2114 PREPARE_DELAYED_WORK(&tp->task, task);
2113 schedule_delayed_work(&tp->task, 4); 2115 schedule_delayed_work(&tp->task, 4);
2114} 2116}
2115 2117
@@ -2128,9 +2130,11 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
2128 netif_poll_enable(dev); 2130 netif_poll_enable(dev);
2129} 2131}
2130 2132
2131static void rtl8169_reinit_task(void *_data) 2133static void rtl8169_reinit_task(struct work_struct *work)
2132{ 2134{
2133 struct net_device *dev = _data; 2135 struct rtl8169_private *tp =
2136 container_of(work, struct rtl8169_private, task.work);
2137 struct net_device *dev = tp->dev;
2134 int ret; 2138 int ret;
2135 2139
2136 if (netif_running(dev)) { 2140 if (netif_running(dev)) {
@@ -2153,10 +2157,11 @@ static void rtl8169_reinit_task(void *_data)
2153 } 2157 }
2154} 2158}
2155 2159
2156static void rtl8169_reset_task(void *_data) 2160static void rtl8169_reset_task(struct work_struct *work)
2157{ 2161{
2158 struct net_device *dev = _data; 2162 struct rtl8169_private *tp =
2159 struct rtl8169_private *tp = netdev_priv(dev); 2163 container_of(work, struct rtl8169_private, task.work);
2164 struct net_device *dev = tp->dev;
2160 2165
2161 if (!netif_running(dev)) 2166 if (!netif_running(dev))
2162 return; 2167 return;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 33569ec9dbfc..250cdbeefdfd 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5872,9 +5872,9 @@ static void s2io_tasklet(unsigned long dev_addr)
5872 * Description: Sets the link status for the adapter 5872 * Description: Sets the link status for the adapter
5873 */ 5873 */
5874 5874
5875static void s2io_set_link(unsigned long data) 5875static void s2io_set_link(struct work_struct *work)
5876{ 5876{
5877 nic_t *nic = (nic_t *) data; 5877 nic_t *nic = container_of(work, nic_t, set_link_task);
5878 struct net_device *dev = nic->dev; 5878 struct net_device *dev = nic->dev;
5879 XENA_dev_config_t __iomem *bar0 = nic->bar0; 5879 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5880 register u64 val64; 5880 register u64 val64;
@@ -6379,10 +6379,10 @@ static int s2io_card_up(nic_t * sp)
6379 * spin lock. 6379 * spin lock.
6380 */ 6380 */
6381 6381
6382static void s2io_restart_nic(unsigned long data) 6382static void s2io_restart_nic(struct work_struct *work)
6383{ 6383{
6384 struct net_device *dev = (struct net_device *) data; 6384 nic_t *sp = container_of(work, nic_t, rst_timer_task);
6385 nic_t *sp = dev->priv; 6385 struct net_device *dev = sp->dev;
6386 6386
6387 s2io_card_down(sp); 6387 s2io_card_down(sp);
6388 if (s2io_card_up(sp)) { 6388 if (s2io_card_up(sp)) {
@@ -6992,10 +6992,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6992 6992
6993 dev->tx_timeout = &s2io_tx_watchdog; 6993 dev->tx_timeout = &s2io_tx_watchdog;
6994 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 6994 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
6995 INIT_WORK(&sp->rst_timer_task, 6995 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
6996 (void (*)(void *)) s2io_restart_nic, dev); 6996 INIT_WORK(&sp->set_link_task, s2io_set_link);
6997 INIT_WORK(&sp->set_link_task,
6998 (void (*)(void *)) s2io_set_link, sp);
6999 6997
7000 pci_save_state(sp->pdev); 6998 pci_save_state(sp->pdev);
7001 6999
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 12b719f4d00f..3b0bafd273c8 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1000,7 +1000,7 @@ s2io_msix_fifo_handle(int irq, void *dev_id);
1000static irqreturn_t s2io_isr(int irq, void *dev_id); 1000static irqreturn_t s2io_isr(int irq, void *dev_id);
1001static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); 1001static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
1002static const struct ethtool_ops netdev_ethtool_ops; 1002static const struct ethtool_ops netdev_ethtool_ops;
1003static void s2io_set_link(unsigned long data); 1003static void s2io_set_link(struct work_struct *work);
1004static int s2io_set_swapper(nic_t * sp); 1004static int s2io_set_swapper(nic_t * sp);
1005static void s2io_card_down(nic_t *nic); 1005static void s2io_card_down(nic_t *nic);
1006static int s2io_card_up(nic_t *nic); 1006static int s2io_card_up(nic_t *nic);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index aaba458584fb..b70ed79d4121 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -280,6 +280,7 @@ enum sis190_feature {
280struct sis190_private { 280struct sis190_private {
281 void __iomem *mmio_addr; 281 void __iomem *mmio_addr;
282 struct pci_dev *pci_dev; 282 struct pci_dev *pci_dev;
283 struct net_device *dev;
283 struct net_device_stats stats; 284 struct net_device_stats stats;
284 spinlock_t lock; 285 spinlock_t lock;
285 u32 rx_buf_sz; 286 u32 rx_buf_sz;
@@ -897,10 +898,11 @@ static void sis190_hw_start(struct net_device *dev)
897 netif_start_queue(dev); 898 netif_start_queue(dev);
898} 899}
899 900
900static void sis190_phy_task(void * data) 901static void sis190_phy_task(struct work_struct *work)
901{ 902{
902 struct net_device *dev = data; 903 struct sis190_private *tp =
903 struct sis190_private *tp = netdev_priv(dev); 904 container_of(work, struct sis190_private, phy_task);
905 struct net_device *dev = tp->dev;
904 void __iomem *ioaddr = tp->mmio_addr; 906 void __iomem *ioaddr = tp->mmio_addr;
905 int phy_id = tp->mii_if.phy_id; 907 int phy_id = tp->mii_if.phy_id;
906 u16 val; 908 u16 val;
@@ -1047,7 +1049,7 @@ static int sis190_open(struct net_device *dev)
1047 if (rc < 0) 1049 if (rc < 0)
1048 goto err_free_rx_1; 1050 goto err_free_rx_1;
1049 1051
1050 INIT_WORK(&tp->phy_task, sis190_phy_task, dev); 1052 INIT_WORK(&tp->phy_task, sis190_phy_task);
1051 1053
1052 sis190_request_timer(dev); 1054 sis190_request_timer(dev);
1053 1055
@@ -1436,6 +1438,7 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1436 SET_NETDEV_DEV(dev, &pdev->dev); 1438 SET_NETDEV_DEV(dev, &pdev->dev);
1437 1439
1438 tp = netdev_priv(dev); 1440 tp = netdev_priv(dev);
1441 tp->dev = dev;
1439 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); 1442 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1440 1443
1441 rc = pci_enable_device(pdev); 1444 rc = pci_enable_device(pdev);
@@ -1798,7 +1801,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1798 1801
1799 sis190_init_rxfilter(dev); 1802 sis190_init_rxfilter(dev);
1800 1803
1801 INIT_WORK(&tp->phy_task, sis190_phy_task, dev); 1804 INIT_WORK(&tp->phy_task, sis190_phy_task);
1802 1805
1803 dev->open = sis190_open; 1806 dev->open = sis190_open;
1804 dev->stop = sis190_close; 1807 dev->stop = sis190_close;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 5513907e8393..b60f0451f6cd 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1327,10 +1327,11 @@ static void xm_check_link(struct net_device *dev)
1327 * Since internal PHY is wired to a level triggered pin, can't 1327 * Since internal PHY is wired to a level triggered pin, can't
1328 * get an interrupt when carrier is detected. 1328 * get an interrupt when carrier is detected.
1329 */ 1329 */
1330static void xm_link_timer(void *arg) 1330static void xm_link_timer(struct work_struct *work)
1331{ 1331{
1332 struct net_device *dev = arg; 1332 struct skge_port *skge =
1333 struct skge_port *skge = netdev_priv(arg); 1333 container_of(work, struct skge_port, link_thread.work);
1334 struct net_device *dev = skge->netdev;
1334 struct skge_hw *hw = skge->hw; 1335 struct skge_hw *hw = skge->hw;
1335 int port = skge->port; 1336 int port = skge->port;
1336 1337
@@ -3072,9 +3073,9 @@ static void skge_error_irq(struct skge_hw *hw)
3072 * because accessing phy registers requires spin wait which might 3073 * because accessing phy registers requires spin wait which might
3073 * cause excess interrupt latency. 3074 * cause excess interrupt latency.
3074 */ 3075 */
3075static void skge_extirq(void *arg) 3076static void skge_extirq(struct work_struct *work)
3076{ 3077{
3077 struct skge_hw *hw = arg; 3078 struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
3078 int port; 3079 int port;
3079 3080
3080 mutex_lock(&hw->phy_mutex); 3081 mutex_lock(&hw->phy_mutex);
@@ -3456,7 +3457,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3456 skge->port = port; 3457 skge->port = port;
3457 3458
3458 /* Only used for Genesis XMAC */ 3459 /* Only used for Genesis XMAC */
3459 INIT_WORK(&skge->link_thread, xm_link_timer, dev); 3460 INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
3460 3461
3461 if (hw->chip_id != CHIP_ID_GENESIS) { 3462 if (hw->chip_id != CHIP_ID_GENESIS) {
3462 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3463 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@ -3543,7 +3544,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3543 3544
3544 hw->pdev = pdev; 3545 hw->pdev = pdev;
3545 mutex_init(&hw->phy_mutex); 3546 mutex_init(&hw->phy_mutex);
3546 INIT_WORK(&hw->phy_work, skge_extirq, hw); 3547 INIT_WORK(&hw->phy_work, skge_extirq);
3547 spin_lock_init(&hw->hw_lock); 3548 spin_lock_init(&hw->hw_lock);
3548 3549
3549 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3550 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 537c0aaa1db8..23e5275d920c 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2456,7 +2456,7 @@ struct skge_port {
2456 2456
2457 struct net_device_stats net_stats; 2457 struct net_device_stats net_stats;
2458 2458
2459 struct work_struct link_thread; 2459 struct delayed_work link_thread;
2460 enum pause_control flow_control; 2460 enum pause_control flow_control;
2461 enum pause_status flow_status; 2461 enum pause_status flow_status;
2462 u8 rx_csum; 2462 u8 rx_csum;
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index cef7e6671c49..f16f696c1ff2 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1937,10 +1937,11 @@ spider_net_stop(struct net_device *netdev)
1937 * called as task when tx hangs, resets interface (if interface is up) 1937 * called as task when tx hangs, resets interface (if interface is up)
1938 */ 1938 */
1939static void 1939static void
1940spider_net_tx_timeout_task(void *data) 1940spider_net_tx_timeout_task(struct work_struct *work)
1941{ 1941{
1942 struct net_device *netdev = data; 1942 struct spider_net_card *card =
1943 struct spider_net_card *card = netdev_priv(netdev); 1943 container_of(work, struct spider_net_card, tx_timeout_task);
1944 struct net_device *netdev = card->netdev;
1944 1945
1945 if (!(netdev->flags & IFF_UP)) 1946 if (!(netdev->flags & IFF_UP))
1946 goto out; 1947 goto out;
@@ -2114,7 +2115,7 @@ spider_net_alloc_card(void)
2114 card = netdev_priv(netdev); 2115 card = netdev_priv(netdev);
2115 card->netdev = netdev; 2116 card->netdev = netdev;
2116 card->msg_enable = SPIDER_NET_DEFAULT_MSG; 2117 card->msg_enable = SPIDER_NET_DEFAULT_MSG;
2117 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev); 2118 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
2118 init_waitqueue_head(&card->waitq); 2119 init_waitqueue_head(&card->waitq);
2119 atomic_set(&card->tx_timeout_task_counter, 0); 2120 atomic_set(&card->tx_timeout_task_counter, 0);
2120 2121
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 334c6cfd6595..d03a9a849c06 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2281,9 +2281,9 @@ static void gem_do_stop(struct net_device *dev, int wol)
2281 } 2281 }
2282} 2282}
2283 2283
2284static void gem_reset_task(void *data) 2284static void gem_reset_task(struct work_struct *work)
2285{ 2285{
2286 struct gem *gp = (struct gem *) data; 2286 struct gem *gp = container_of(work, struct gem, reset_task);
2287 2287
2288 mutex_lock(&gp->pm_mutex); 2288 mutex_lock(&gp->pm_mutex);
2289 2289
@@ -3043,7 +3043,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3043 gp->link_timer.function = gem_link_timer; 3043 gp->link_timer.function = gem_link_timer;
3044 gp->link_timer.data = (unsigned long) gp; 3044 gp->link_timer.data = (unsigned long) gp;
3045 3045
3046 INIT_WORK(&gp->reset_task, gem_reset_task, gp); 3046 INIT_WORK(&gp->reset_task, gem_reset_task);
3047 3047
3048 gp->lstate = link_down; 3048 gp->lstate = link_down;
3049 gp->timer_ticks = 0; 3049 gp->timer_ticks = 0;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index c20bb998e0e5..d9123c9adc1e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3654,9 +3654,9 @@ static void tg3_poll_controller(struct net_device *dev)
3654} 3654}
3655#endif 3655#endif
3656 3656
3657static void tg3_reset_task(void *_data) 3657static void tg3_reset_task(struct work_struct *work)
3658{ 3658{
3659 struct tg3 *tp = _data; 3659 struct tg3 *tp = container_of(work, struct tg3, reset_task);
3660 unsigned int restart_timer; 3660 unsigned int restart_timer;
3661 3661
3662 tg3_full_lock(tp, 0); 3662 tg3_full_lock(tp, 0);
@@ -11734,7 +11734,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11734#endif 11734#endif
11735 spin_lock_init(&tp->lock); 11735 spin_lock_init(&tp->lock);
11736 spin_lock_init(&tp->indirect_lock); 11736 spin_lock_init(&tp->indirect_lock);
11737 INIT_WORK(&tp->reset_task, tg3_reset_task, tp); 11737 INIT_WORK(&tp->reset_task, tg3_reset_task);
11738 11738
11739 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); 11739 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11740 if (tp->regs == 0UL) { 11740 if (tp->regs == 0UL) {
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index e14f5a00f65a..f85f00251123 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -296,6 +296,7 @@ static void TLan_SetMulticastList( struct net_device *);
296static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 296static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
297static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); 297static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent);
298static void TLan_tx_timeout( struct net_device *dev); 298static void TLan_tx_timeout( struct net_device *dev);
299static void TLan_tx_timeout_work(struct work_struct *work);
299static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 300static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
300 301
301static u32 TLan_HandleInvalid( struct net_device *, u16 ); 302static u32 TLan_HandleInvalid( struct net_device *, u16 );
@@ -562,6 +563,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
562 priv = netdev_priv(dev); 563 priv = netdev_priv(dev);
563 564
564 priv->pciDev = pdev; 565 priv->pciDev = pdev;
566 priv->dev = dev;
565 567
566 /* Is this a PCI device? */ 568 /* Is this a PCI device? */
567 if (pdev) { 569 if (pdev) {
@@ -634,7 +636,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
634 636
635 /* This will be used when we get an adapter error from 637 /* This will be used when we get an adapter error from
636 * within our irq handler */ 638 * within our irq handler */
637 INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev); 639 INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
638 640
639 spin_lock_init(&priv->lock); 641 spin_lock_init(&priv->lock);
640 642
@@ -1040,6 +1042,25 @@ static void TLan_tx_timeout(struct net_device *dev)
1040} 1042}
1041 1043
1042 1044
1045 /***************************************************************
1046 * TLan_tx_timeout_work
1047 *
1048 * Returns: nothing
1049 *
1050 * Params:
1051 * work work item of device which timed out
1052 *
1053 **************************************************************/
1054
1055static void TLan_tx_timeout_work(struct work_struct *work)
1056{
1057 TLanPrivateInfo *priv =
1058 container_of(work, TLanPrivateInfo, tlan_tqueue);
1059
1060 TLan_tx_timeout(priv->dev);
1061}
1062
1063
1043 1064
1044 /*************************************************************** 1065 /***************************************************************
1045 * TLan_StartTx 1066 * TLan_StartTx
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index a44e2f2ef62a..41ce0b665937 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -170,6 +170,7 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
170typedef struct tlan_private_tag { 170typedef struct tlan_private_tag {
171 struct net_device *nextDevice; 171 struct net_device *nextDevice;
172 struct pci_dev *pciDev; 172 struct pci_dev *pciDev;
173 struct net_device *dev;
173 void *dmaStorage; 174 void *dmaStorage;
174 dma_addr_t dmaStorageDMA; 175 dma_addr_t dmaStorageDMA;
175 unsigned int dmaSize; 176 unsigned int dmaSize;
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index fa3a2bb105ad..942b839ccc5b 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -26,10 +26,11 @@ static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
26 26
27/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list 27/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
28 of available transceivers. */ 28 of available transceivers. */
29void t21142_media_task(void *data) 29void t21142_media_task(struct work_struct *work)
30{ 30{
31 struct net_device *dev = data; 31 struct tulip_private *tp =
32 struct tulip_private *tp = netdev_priv(dev); 32 container_of(work, struct tulip_private, media_work);
33 struct net_device *dev = tp->dev;
33 void __iomem *ioaddr = tp->base_addr; 34 void __iomem *ioaddr = tp->base_addr;
34 int csr12 = ioread32(ioaddr + CSR12); 35 int csr12 = ioread32(ioaddr + CSR12);
35 int next_tick = 60*HZ; 36 int next_tick = 60*HZ;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 066e5d6bcbd8..df326fe1cc8f 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -18,10 +18,11 @@
18#include "tulip.h" 18#include "tulip.h"
19 19
20 20
21void tulip_media_task(void *data) 21void tulip_media_task(struct work_struct *work)
22{ 22{
23 struct net_device *dev = data; 23 struct tulip_private *tp =
24 struct tulip_private *tp = netdev_priv(dev); 24 container_of(work, struct tulip_private, media_work);
25 struct net_device *dev = tp->dev;
25 void __iomem *ioaddr = tp->base_addr; 26 void __iomem *ioaddr = tp->base_addr;
26 u32 csr12 = ioread32(ioaddr + CSR12); 27 u32 csr12 = ioread32(ioaddr + CSR12);
27 int next_tick = 2*HZ; 28 int next_tick = 2*HZ;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index ad107f45c7b1..25f25da76917 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -44,7 +44,7 @@ struct tulip_chip_table {
44 int valid_intrs; /* CSR7 interrupt enable settings */ 44 int valid_intrs; /* CSR7 interrupt enable settings */
45 int flags; 45 int flags;
46 void (*media_timer) (unsigned long); 46 void (*media_timer) (unsigned long);
47 void (*media_task) (void *); 47 work_func_t media_task;
48}; 48};
49 49
50 50
@@ -392,6 +392,7 @@ struct tulip_private {
392 int csr12_shadow; 392 int csr12_shadow;
393 int pad0; /* Used for 8-byte alignment */ 393 int pad0; /* Used for 8-byte alignment */
394 struct work_struct media_work; 394 struct work_struct media_work;
395 struct net_device *dev;
395}; 396};
396 397
397 398
@@ -406,7 +407,7 @@ struct eeprom_fixup {
406 407
407/* 21142.c */ 408/* 21142.c */
408extern u16 t21142_csr14[]; 409extern u16 t21142_csr14[];
409void t21142_media_task(void *data); 410void t21142_media_task(struct work_struct *work);
410void t21142_start_nway(struct net_device *dev); 411void t21142_start_nway(struct net_device *dev);
411void t21142_lnk_change(struct net_device *dev, int csr5); 412void t21142_lnk_change(struct net_device *dev, int csr5);
412 413
@@ -444,7 +445,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5);
444void pnic_timer(unsigned long data); 445void pnic_timer(unsigned long data);
445 446
446/* timer.c */ 447/* timer.c */
447void tulip_media_task(void *data); 448void tulip_media_task(struct work_struct *work);
448void mxic_timer(unsigned long data); 449void mxic_timer(unsigned long data);
449void comet_timer(unsigned long data); 450void comet_timer(unsigned long data);
450 451
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0aee618f883c..5a35354aa523 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1367,6 +1367,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1367 * it is zeroed and aligned in alloc_etherdev 1367 * it is zeroed and aligned in alloc_etherdev
1368 */ 1368 */
1369 tp = netdev_priv(dev); 1369 tp = netdev_priv(dev);
1370 tp->dev = dev;
1370 1371
1371 tp->rx_ring = pci_alloc_consistent(pdev, 1372 tp->rx_ring = pci_alloc_consistent(pdev,
1372 sizeof(struct tulip_rx_desc) * RX_RING_SIZE + 1373 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
@@ -1389,7 +1390,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1389 tp->timer.data = (unsigned long)dev; 1390 tp->timer.data = (unsigned long)dev;
1390 tp->timer.function = tulip_tbl[tp->chip_id].media_timer; 1391 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1391 1392
1392 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev); 1393 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1393 1394
1394 dev->base_addr = (unsigned long)ioaddr; 1395 dev->base_addr = (unsigned long)ioaddr;
1395 1396
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 931cbdf6d791..b2a23aed4428 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -125,8 +125,8 @@ static int cpc_tty_write_room(struct tty_struct *tty);
125static int cpc_tty_chars_in_buffer(struct tty_struct *tty); 125static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
126static void cpc_tty_flush_buffer(struct tty_struct *tty); 126static void cpc_tty_flush_buffer(struct tty_struct *tty);
127static void cpc_tty_hangup(struct tty_struct *tty); 127static void cpc_tty_hangup(struct tty_struct *tty);
128static void cpc_tty_rx_work(void *data); 128static void cpc_tty_rx_work(struct work_struct *work);
129static void cpc_tty_tx_work(void *data); 129static void cpc_tty_tx_work(struct work_struct *work);
130static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); 130static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
131static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); 131static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
132static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); 132static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
@@ -261,8 +261,8 @@ void cpc_tty_init(pc300dev_t *pc300dev)
261 cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; 261 cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
262 cpc_tty->pc300dev = pc300dev; 262 cpc_tty->pc300dev = pc300dev;
263 263
264 INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty); 264 INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work);
265 INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port); 265 INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work);
266 266
267 cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL; 267 cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
268 268
@@ -659,21 +659,23 @@ static void cpc_tty_hangup(struct tty_struct *tty)
659 * o call the line disc. read 659 * o call the line disc. read
660 * o free memory 660 * o free memory
661 */ 661 */
662static void cpc_tty_rx_work(void * data) 662static void cpc_tty_rx_work(struct work_struct *work)
663{ 663{
664 st_cpc_tty_area *cpc_tty;
664 unsigned long port; 665 unsigned long port;
665 int i, j; 666 int i, j;
666 st_cpc_tty_area *cpc_tty;
667 volatile st_cpc_rx_buf *buf; 667 volatile st_cpc_rx_buf *buf;
668 char flags=0,flg_rx=1; 668 char flags=0,flg_rx=1;
669 struct tty_ldisc *ld; 669 struct tty_ldisc *ld;
670 670
671 if (cpc_tty_cnt == 0) return; 671 if (cpc_tty_cnt == 0) return;
672
673 672
674 for (i=0; (i < 4) && flg_rx ; i++) { 673 for (i=0; (i < 4) && flg_rx ; i++) {
675 flg_rx = 0; 674 flg_rx = 0;
676 port = (unsigned long)data; 675
676 cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work);
677 port = cpc_tty - cpc_tty_area;
678
677 for (j=0; j < CPC_TTY_NPORTS; j++) { 679 for (j=0; j < CPC_TTY_NPORTS; j++) {
678 cpc_tty = &cpc_tty_area[port]; 680 cpc_tty = &cpc_tty_area[port];
679 681
@@ -882,9 +884,10 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
882 * o if need call line discipline wakeup 884 * o if need call line discipline wakeup
883 * o call wake_up_interruptible 885 * o call wake_up_interruptible
884 */ 886 */
885static void cpc_tty_tx_work(void *data) 887static void cpc_tty_tx_work(struct work_struct *work)
886{ 888{
887 st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; 889 st_cpc_tty_area *cpc_tty =
890 container_of(work, st_cpc_tty_area, tty_tx_work);
888 struct tty_struct *tty; 891 struct tty_struct *tty;
889 892
890 CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name); 893 CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 94dfb92fab5c..8286678513b9 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -819,7 +819,7 @@ struct bcm43xx_private {
819 struct tasklet_struct isr_tasklet; 819 struct tasklet_struct isr_tasklet;
820 820
821 /* Periodic tasks */ 821 /* Periodic tasks */
822 struct work_struct periodic_work; 822 struct delayed_work periodic_work;
823 unsigned int periodic_state; 823 unsigned int periodic_state;
824 824
825 struct work_struct restart_work; 825 struct work_struct restart_work;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 5b3c27359a18..2ec2e5afce67 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3215,9 +3215,10 @@ static void do_periodic_work(struct bcm43xx_private *bcm)
3215 schedule_delayed_work(&bcm->periodic_work, HZ * 15); 3215 schedule_delayed_work(&bcm->periodic_work, HZ * 15);
3216} 3216}
3217 3217
3218static void bcm43xx_periodic_work_handler(void *d) 3218static void bcm43xx_periodic_work_handler(struct work_struct *work)
3219{ 3219{
3220 struct bcm43xx_private *bcm = d; 3220 struct bcm43xx_private *bcm =
3221 container_of(work, struct bcm43xx_private, periodic_work.work);
3221 struct net_device *net_dev = bcm->net_dev; 3222 struct net_device *net_dev = bcm->net_dev;
3222 unsigned long flags; 3223 unsigned long flags;
3223 u32 savedirqs = 0; 3224 u32 savedirqs = 0;
@@ -3279,11 +3280,11 @@ void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm)
3279 3280
3280void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) 3281void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
3281{ 3282{
3282 struct work_struct *work = &(bcm->periodic_work); 3283 struct delayed_work *work = &bcm->periodic_work;
3283 3284
3284 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); 3285 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
3285 INIT_WORK(work, bcm43xx_periodic_work_handler, bcm); 3286 INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
3286 schedule_work(work); 3287 schedule_delayed_work(work, 0);
3287} 3288}
3288 3289
3289static void bcm43xx_security_init(struct bcm43xx_private *bcm) 3290static void bcm43xx_security_init(struct bcm43xx_private *bcm)
@@ -3635,7 +3636,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
3635 bcm43xx_periodic_tasks_setup(bcm); 3636 bcm43xx_periodic_tasks_setup(bcm);
3636 3637
3637 /*FIXME: This should be handled by softmac instead. */ 3638 /*FIXME: This should be handled by softmac instead. */
3638 schedule_work(&bcm->softmac->associnfo.work); 3639 schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
3639 3640
3640out: 3641out:
3641 mutex_unlock(&(bcm)->mutex); 3642 mutex_unlock(&(bcm)->mutex);
@@ -4182,9 +4183,10 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev)
4182/* Hard-reset the chip. Do not call this directly. 4183/* Hard-reset the chip. Do not call this directly.
4183 * Use bcm43xx_controller_restart() 4184 * Use bcm43xx_controller_restart()
4184 */ 4185 */
4185static void bcm43xx_chip_reset(void *_bcm) 4186static void bcm43xx_chip_reset(struct work_struct *work)
4186{ 4187{
4187 struct bcm43xx_private *bcm = _bcm; 4188 struct bcm43xx_private *bcm =
4189 container_of(work, struct bcm43xx_private, restart_work);
4188 struct bcm43xx_phyinfo *phy; 4190 struct bcm43xx_phyinfo *phy;
4189 int err = -ENODEV; 4191 int err = -ENODEV;
4190 4192
@@ -4211,7 +4213,7 @@ void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason)
4211 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) 4213 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
4212 return; 4214 return;
4213 printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason); 4215 printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
4214 INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm); 4216 INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
4215 schedule_work(&bcm->restart_work); 4217 schedule_work(&bcm->restart_work);
4216} 4218}
4217 4219
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index e663518bd570..e89c890d16fd 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -35,7 +35,7 @@ int hostap_80211_get_hdrlen(u16 fc);
35struct net_device_stats *hostap_get_stats(struct net_device *dev); 35struct net_device_stats *hostap_get_stats(struct net_device *dev);
36void hostap_setup_dev(struct net_device *dev, local_info_t *local, 36void hostap_setup_dev(struct net_device *dev, local_info_t *local,
37 int main_dev); 37 int main_dev);
38void hostap_set_multicast_list_queue(void *data); 38void hostap_set_multicast_list_queue(struct work_struct *work);
39int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); 39int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
40int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); 40int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
41void hostap_cleanup(local_info_t *local); 41void hostap_cleanup(local_info_t *local);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index ba13125024cb..08bc57a4b895 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -49,10 +49,10 @@ MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs "
49static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); 49static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
50static void hostap_event_expired_sta(struct net_device *dev, 50static void hostap_event_expired_sta(struct net_device *dev,
51 struct sta_info *sta); 51 struct sta_info *sta);
52static void handle_add_proc_queue(void *data); 52static void handle_add_proc_queue(struct work_struct *work);
53 53
54#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 54#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
55static void handle_wds_oper_queue(void *data); 55static void handle_wds_oper_queue(struct work_struct *work);
56static void prism2_send_mgmt(struct net_device *dev, 56static void prism2_send_mgmt(struct net_device *dev,
57 u16 type_subtype, char *body, 57 u16 type_subtype, char *body,
58 int body_len, u8 *addr, u16 tx_cb_idx); 58 int body_len, u8 *addr, u16 tx_cb_idx);
@@ -807,7 +807,7 @@ void hostap_init_data(local_info_t *local)
807 INIT_LIST_HEAD(&ap->sta_list); 807 INIT_LIST_HEAD(&ap->sta_list);
808 808
809 /* Initialize task queue structure for AP management */ 809 /* Initialize task queue structure for AP management */
810 INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap); 810 INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
811 811
812 ap->tx_callback_idx = 812 ap->tx_callback_idx =
813 hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); 813 hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
@@ -815,7 +815,7 @@ void hostap_init_data(local_info_t *local)
815 printk(KERN_WARNING "%s: failed to register TX callback for " 815 printk(KERN_WARNING "%s: failed to register TX callback for "
816 "AP\n", local->dev->name); 816 "AP\n", local->dev->name);
817#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 817#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
818 INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local); 818 INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
819 819
820 ap->tx_callback_auth = 820 ap->tx_callback_auth =
821 hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); 821 hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
@@ -1062,9 +1062,10 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off,
1062} 1062}
1063 1063
1064 1064
1065static void handle_add_proc_queue(void *data) 1065static void handle_add_proc_queue(struct work_struct *work)
1066{ 1066{
1067 struct ap_data *ap = (struct ap_data *) data; 1067 struct ap_data *ap = container_of(work, struct ap_data,
1068 add_sta_proc_queue);
1068 struct sta_info *sta; 1069 struct sta_info *sta;
1069 char name[20]; 1070 char name[20];
1070 struct add_sta_proc_data *entry, *prev; 1071 struct add_sta_proc_data *entry, *prev;
@@ -1952,9 +1953,11 @@ static void handle_pspoll(local_info_t *local,
1952 1953
1953#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 1954#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
1954 1955
1955static void handle_wds_oper_queue(void *data) 1956static void handle_wds_oper_queue(struct work_struct *work)
1956{ 1957{
1957 local_info_t *local = data; 1958 struct ap_data *ap = container_of(work, struct ap_data,
1959 wds_oper_queue);
1960 local_info_t *local = ap->local;
1958 struct wds_oper_data *entry, *prev; 1961 struct wds_oper_data *entry, *prev;
1959 1962
1960 spin_lock_bh(&local->lock); 1963 spin_lock_bh(&local->lock);
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ed00ebb6e7f4..c19e68636a1c 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1645,9 +1645,9 @@ static void prism2_schedule_reset(local_info_t *local)
1645 1645
1646/* Called only as scheduled task after noticing card timeout in interrupt 1646/* Called only as scheduled task after noticing card timeout in interrupt
1647 * context */ 1647 * context */
1648static void handle_reset_queue(void *data) 1648static void handle_reset_queue(struct work_struct *work)
1649{ 1649{
1650 local_info_t *local = (local_info_t *) data; 1650 local_info_t *local = container_of(work, local_info_t, reset_queue);
1651 1651
1652 printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name); 1652 printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
1653 prism2_hw_reset(local->dev); 1653 prism2_hw_reset(local->dev);
@@ -2896,9 +2896,10 @@ static void hostap_passive_scan(unsigned long data)
2896 2896
2897/* Called only as a scheduled task when communications quality values should 2897/* Called only as a scheduled task when communications quality values should
2898 * be updated. */ 2898 * be updated. */
2899static void handle_comms_qual_update(void *data) 2899static void handle_comms_qual_update(struct work_struct *work)
2900{ 2900{
2901 local_info_t *local = data; 2901 local_info_t *local =
2902 container_of(work, local_info_t, comms_qual_update);
2902 prism2_update_comms_qual(local->dev); 2903 prism2_update_comms_qual(local->dev);
2903} 2904}
2904 2905
@@ -3050,9 +3051,9 @@ static int prism2_set_tim(struct net_device *dev, int aid, int set)
3050} 3051}
3051 3052
3052 3053
3053static void handle_set_tim_queue(void *data) 3054static void handle_set_tim_queue(struct work_struct *work)
3054{ 3055{
3055 local_info_t *local = (local_info_t *) data; 3056 local_info_t *local = container_of(work, local_info_t, set_tim_queue);
3056 struct set_tim_data *entry; 3057 struct set_tim_data *entry;
3057 u16 val; 3058 u16 val;
3058 3059
@@ -3209,15 +3210,15 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
3209 local->scan_channel_mask = 0xffff; 3210 local->scan_channel_mask = 0xffff;
3210 3211
3211 /* Initialize task queue structures */ 3212 /* Initialize task queue structures */
3212 INIT_WORK(&local->reset_queue, handle_reset_queue, local); 3213 INIT_WORK(&local->reset_queue, handle_reset_queue);
3213 INIT_WORK(&local->set_multicast_list_queue, 3214 INIT_WORK(&local->set_multicast_list_queue,
3214 hostap_set_multicast_list_queue, local->dev); 3215 hostap_set_multicast_list_queue);
3215 3216
3216 INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local); 3217 INIT_WORK(&local->set_tim_queue, handle_set_tim_queue);
3217 INIT_LIST_HEAD(&local->set_tim_list); 3218 INIT_LIST_HEAD(&local->set_tim_list);
3218 spin_lock_init(&local->set_tim_lock); 3219 spin_lock_init(&local->set_tim_lock);
3219 3220
3220 INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local); 3221 INIT_WORK(&local->comms_qual_update, handle_comms_qual_update);
3221 3222
3222 /* Initialize tasklets for handling hardware IRQ related operations 3223 /* Initialize tasklets for handling hardware IRQ related operations
3223 * outside hw IRQ handler */ 3224 * outside hw IRQ handler */
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 50f72d831cf4..5fd2b1ad7f5e 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -474,9 +474,9 @@ static void handle_info_queue_scanresults(local_info_t *local)
474 474
475/* Called only as scheduled task after receiving info frames (used to avoid 475/* Called only as scheduled task after receiving info frames (used to avoid
476 * pending too much time in HW IRQ handler). */ 476 * pending too much time in HW IRQ handler). */
477static void handle_info_queue(void *data) 477static void handle_info_queue(struct work_struct *work)
478{ 478{
479 local_info_t *local = (local_info_t *) data; 479 local_info_t *local = container_of(work, local_info_t, info_queue);
480 480
481 if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS, 481 if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
482 &local->pending_info)) 482 &local->pending_info))
@@ -493,7 +493,7 @@ void hostap_info_init(local_info_t *local)
493{ 493{
494 skb_queue_head_init(&local->info_list); 494 skb_queue_head_init(&local->info_list);
495#ifndef PRISM2_NO_STATION_MODES 495#ifndef PRISM2_NO_STATION_MODES
496 INIT_WORK(&local->info_queue, handle_info_queue, local); 496 INIT_WORK(&local->info_queue, handle_info_queue);
497#endif /* PRISM2_NO_STATION_MODES */ 497#endif /* PRISM2_NO_STATION_MODES */
498} 498}
499 499
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 53374fcba77e..0796be9d9e77 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -767,14 +767,14 @@ static int prism2_set_mac_address(struct net_device *dev, void *p)
767 767
768/* TODO: to be further implemented as soon as Prism2 fully supports 768/* TODO: to be further implemented as soon as Prism2 fully supports
769 * GroupAddresses and correct documentation is available */ 769 * GroupAddresses and correct documentation is available */
770void hostap_set_multicast_list_queue(void *data) 770void hostap_set_multicast_list_queue(struct work_struct *work)
771{ 771{
772 struct net_device *dev = (struct net_device *) data; 772 local_info_t *local =
773 container_of(work, local_info_t, set_multicast_list_queue);
774 struct net_device *dev = local->dev;
773 struct hostap_interface *iface; 775 struct hostap_interface *iface;
774 local_info_t *local;
775 776
776 iface = netdev_priv(dev); 777 iface = netdev_priv(dev);
777 local = iface->local;
778 if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, 778 if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
779 local->is_promisc)) { 779 local->is_promisc)) {
780 printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", 780 printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 79607b8b877c..1bcd352a813b 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -316,7 +316,7 @@ static void ipw2100_release_firmware(struct ipw2100_priv *priv,
316 struct ipw2100_fw *fw); 316 struct ipw2100_fw *fw);
317static int ipw2100_ucode_download(struct ipw2100_priv *priv, 317static int ipw2100_ucode_download(struct ipw2100_priv *priv,
318 struct ipw2100_fw *fw); 318 struct ipw2100_fw *fw);
319static void ipw2100_wx_event_work(struct ipw2100_priv *priv); 319static void ipw2100_wx_event_work(struct work_struct *work);
320static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); 320static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
321static struct iw_handler_def ipw2100_wx_handler_def; 321static struct iw_handler_def ipw2100_wx_handler_def;
322 322
@@ -679,7 +679,8 @@ static void schedule_reset(struct ipw2100_priv *priv)
679 queue_delayed_work(priv->workqueue, &priv->reset_work, 679 queue_delayed_work(priv->workqueue, &priv->reset_work,
680 priv->reset_backoff * HZ); 680 priv->reset_backoff * HZ);
681 else 681 else
682 queue_work(priv->workqueue, &priv->reset_work); 682 queue_delayed_work(priv->workqueue, &priv->reset_work,
683 0);
683 684
684 if (priv->reset_backoff < MAX_RESET_BACKOFF) 685 if (priv->reset_backoff < MAX_RESET_BACKOFF)
685 priv->reset_backoff++; 686 priv->reset_backoff++;
@@ -1873,8 +1874,10 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1873 netif_stop_queue(priv->net_dev); 1874 netif_stop_queue(priv->net_dev);
1874} 1875}
1875 1876
1876static void ipw2100_reset_adapter(struct ipw2100_priv *priv) 1877static void ipw2100_reset_adapter(struct work_struct *work)
1877{ 1878{
1879 struct ipw2100_priv *priv =
1880 container_of(work, struct ipw2100_priv, reset_work.work);
1878 unsigned long flags; 1881 unsigned long flags;
1879 union iwreq_data wrqu = { 1882 union iwreq_data wrqu = {
1880 .ap_addr = { 1883 .ap_addr = {
@@ -2071,9 +2074,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
2071 return; 2074 return;
2072 2075
2073 if (priv->status & STATUS_SECURITY_UPDATED) 2076 if (priv->status & STATUS_SECURITY_UPDATED)
2074 queue_work(priv->workqueue, &priv->security_work); 2077 queue_delayed_work(priv->workqueue, &priv->security_work, 0);
2075 2078
2076 queue_work(priv->workqueue, &priv->wx_event_work); 2079 queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
2077} 2080}
2078 2081
2079static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) 2082static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -5524,8 +5527,11 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode)
5524 return err; 5527 return err;
5525} 5528}
5526 5529
5527static void ipw2100_security_work(struct ipw2100_priv *priv) 5530static void ipw2100_security_work(struct work_struct *work)
5528{ 5531{
5532 struct ipw2100_priv *priv =
5533 container_of(work, struct ipw2100_priv, security_work.work);
5534
5529 /* If we happen to have reconnected before we get a chance to 5535 /* If we happen to have reconnected before we get a chance to
5530 * process this, then update the security settings--which causes 5536 * process this, then update the security settings--which causes
5531 * a disassociation to occur */ 5537 * a disassociation to occur */
@@ -5748,7 +5754,7 @@ static int ipw2100_set_address(struct net_device *dev, void *p)
5748 5754
5749 priv->reset_backoff = 0; 5755 priv->reset_backoff = 0;
5750 mutex_unlock(&priv->action_mutex); 5756 mutex_unlock(&priv->action_mutex);
5751 ipw2100_reset_adapter(priv); 5757 ipw2100_reset_adapter(&priv->reset_work.work);
5752 return 0; 5758 return 0;
5753 5759
5754 done: 5760 done:
@@ -5910,9 +5916,10 @@ static const struct ethtool_ops ipw2100_ethtool_ops = {
5910 .get_drvinfo = ipw_ethtool_get_drvinfo, 5916 .get_drvinfo = ipw_ethtool_get_drvinfo,
5911}; 5917};
5912 5918
5913static void ipw2100_hang_check(void *adapter) 5919static void ipw2100_hang_check(struct work_struct *work)
5914{ 5920{
5915 struct ipw2100_priv *priv = adapter; 5921 struct ipw2100_priv *priv =
5922 container_of(work, struct ipw2100_priv, hang_check.work);
5916 unsigned long flags; 5923 unsigned long flags;
5917 u32 rtc = 0xa5a5a5a5; 5924 u32 rtc = 0xa5a5a5a5;
5918 u32 len = sizeof(rtc); 5925 u32 len = sizeof(rtc);
@@ -5952,9 +5959,10 @@ static void ipw2100_hang_check(void *adapter)
5952 spin_unlock_irqrestore(&priv->low_lock, flags); 5959 spin_unlock_irqrestore(&priv->low_lock, flags);
5953} 5960}
5954 5961
5955static void ipw2100_rf_kill(void *adapter) 5962static void ipw2100_rf_kill(struct work_struct *work)
5956{ 5963{
5957 struct ipw2100_priv *priv = adapter; 5964 struct ipw2100_priv *priv =
5965 container_of(work, struct ipw2100_priv, rf_kill.work);
5958 unsigned long flags; 5966 unsigned long flags;
5959 5967
5960 spin_lock_irqsave(&priv->low_lock, flags); 5968 spin_lock_irqsave(&priv->low_lock, flags);
@@ -6103,14 +6111,11 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6103 6111
6104 priv->workqueue = create_workqueue(DRV_NAME); 6112 priv->workqueue = create_workqueue(DRV_NAME);
6105 6113
6106 INIT_WORK(&priv->reset_work, 6114 INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
6107 (void (*)(void *))ipw2100_reset_adapter, priv); 6115 INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
6108 INIT_WORK(&priv->security_work, 6116 INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
6109 (void (*)(void *))ipw2100_security_work, priv); 6117 INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
6110 INIT_WORK(&priv->wx_event_work, 6118 INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
6111 (void (*)(void *))ipw2100_wx_event_work, priv);
6112 INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
6113 INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
6114 6119
6115 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 6120 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6116 ipw2100_irq_tasklet, (unsigned long)priv); 6121 ipw2100_irq_tasklet, (unsigned long)priv);
@@ -8281,8 +8286,10 @@ static struct iw_handler_def ipw2100_wx_handler_def = {
8281 .get_wireless_stats = ipw2100_wx_wireless_stats, 8286 .get_wireless_stats = ipw2100_wx_wireless_stats,
8282}; 8287};
8283 8288
8284static void ipw2100_wx_event_work(struct ipw2100_priv *priv) 8289static void ipw2100_wx_event_work(struct work_struct *work)
8285{ 8290{
8291 struct ipw2100_priv *priv =
8292 container_of(work, struct ipw2100_priv, wx_event_work.work);
8286 union iwreq_data wrqu; 8293 union iwreq_data wrqu;
8287 int len = ETH_ALEN; 8294 int len = ETH_ALEN;
8288 8295
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
index 55b7227198df..de7d384d38af 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2100.h
@@ -583,11 +583,11 @@ struct ipw2100_priv {
583 struct tasklet_struct irq_tasklet; 583 struct tasklet_struct irq_tasklet;
584 584
585 struct workqueue_struct *workqueue; 585 struct workqueue_struct *workqueue;
586 struct work_struct reset_work; 586 struct delayed_work reset_work;
587 struct work_struct security_work; 587 struct delayed_work security_work;
588 struct work_struct wx_event_work; 588 struct delayed_work wx_event_work;
589 struct work_struct hang_check; 589 struct delayed_work hang_check;
590 struct work_struct rf_kill; 590 struct delayed_work rf_kill;
591 591
592 u32 interrupts; 592 u32 interrupts;
593 int tx_interrupts; 593 int tx_interrupts;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index c692d01a76ca..e82e56bb85e1 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -187,9 +187,9 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); 187static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188static void ipw_rx_queue_replenish(void *); 188static void ipw_rx_queue_replenish(void *);
189static int ipw_up(struct ipw_priv *); 189static int ipw_up(struct ipw_priv *);
190static void ipw_bg_up(void *); 190static void ipw_bg_up(struct work_struct *work);
191static void ipw_down(struct ipw_priv *); 191static void ipw_down(struct ipw_priv *);
192static void ipw_bg_down(void *); 192static void ipw_bg_down(struct work_struct *work);
193static int ipw_config(struct ipw_priv *); 193static int ipw_config(struct ipw_priv *);
194static int init_supported_rates(struct ipw_priv *priv, 194static int init_supported_rates(struct ipw_priv *priv,
195 struct ipw_supported_rates *prates); 195 struct ipw_supported_rates *prates);
@@ -862,11 +862,12 @@ static void ipw_led_link_on(struct ipw_priv *priv)
862 spin_unlock_irqrestore(&priv->lock, flags); 862 spin_unlock_irqrestore(&priv->lock, flags);
863} 863}
864 864
865static void ipw_bg_led_link_on(void *data) 865static void ipw_bg_led_link_on(struct work_struct *work)
866{ 866{
867 struct ipw_priv *priv = data; 867 struct ipw_priv *priv =
868 container_of(work, struct ipw_priv, led_link_on.work);
868 mutex_lock(&priv->mutex); 869 mutex_lock(&priv->mutex);
869 ipw_led_link_on(data); 870 ipw_led_link_on(priv);
870 mutex_unlock(&priv->mutex); 871 mutex_unlock(&priv->mutex);
871} 872}
872 873
@@ -906,11 +907,12 @@ static void ipw_led_link_off(struct ipw_priv *priv)
906 spin_unlock_irqrestore(&priv->lock, flags); 907 spin_unlock_irqrestore(&priv->lock, flags);
907} 908}
908 909
909static void ipw_bg_led_link_off(void *data) 910static void ipw_bg_led_link_off(struct work_struct *work)
910{ 911{
911 struct ipw_priv *priv = data; 912 struct ipw_priv *priv =
913 container_of(work, struct ipw_priv, led_link_off.work);
912 mutex_lock(&priv->mutex); 914 mutex_lock(&priv->mutex);
913 ipw_led_link_off(data); 915 ipw_led_link_off(priv);
914 mutex_unlock(&priv->mutex); 916 mutex_unlock(&priv->mutex);
915} 917}
916 918
@@ -985,11 +987,12 @@ static void ipw_led_activity_off(struct ipw_priv *priv)
985 spin_unlock_irqrestore(&priv->lock, flags); 987 spin_unlock_irqrestore(&priv->lock, flags);
986} 988}
987 989
988static void ipw_bg_led_activity_off(void *data) 990static void ipw_bg_led_activity_off(struct work_struct *work)
989{ 991{
990 struct ipw_priv *priv = data; 992 struct ipw_priv *priv =
993 container_of(work, struct ipw_priv, led_act_off.work);
991 mutex_lock(&priv->mutex); 994 mutex_lock(&priv->mutex);
992 ipw_led_activity_off(data); 995 ipw_led_activity_off(priv);
993 mutex_unlock(&priv->mutex); 996 mutex_unlock(&priv->mutex);
994} 997}
995 998
@@ -2228,11 +2231,12 @@ static void ipw_adapter_restart(void *adapter)
2228 } 2231 }
2229} 2232}
2230 2233
2231static void ipw_bg_adapter_restart(void *data) 2234static void ipw_bg_adapter_restart(struct work_struct *work)
2232{ 2235{
2233 struct ipw_priv *priv = data; 2236 struct ipw_priv *priv =
2237 container_of(work, struct ipw_priv, adapter_restart);
2234 mutex_lock(&priv->mutex); 2238 mutex_lock(&priv->mutex);
2235 ipw_adapter_restart(data); 2239 ipw_adapter_restart(priv);
2236 mutex_unlock(&priv->mutex); 2240 mutex_unlock(&priv->mutex);
2237} 2241}
2238 2242
@@ -2249,11 +2253,12 @@ static void ipw_scan_check(void *data)
2249 } 2253 }
2250} 2254}
2251 2255
2252static void ipw_bg_scan_check(void *data) 2256static void ipw_bg_scan_check(struct work_struct *work)
2253{ 2257{
2254 struct ipw_priv *priv = data; 2258 struct ipw_priv *priv =
2259 container_of(work, struct ipw_priv, scan_check.work);
2255 mutex_lock(&priv->mutex); 2260 mutex_lock(&priv->mutex);
2256 ipw_scan_check(data); 2261 ipw_scan_check(priv);
2257 mutex_unlock(&priv->mutex); 2262 mutex_unlock(&priv->mutex);
2258} 2263}
2259 2264
@@ -3831,17 +3836,19 @@ static int ipw_disassociate(void *data)
3831 return 1; 3836 return 1;
3832} 3837}
3833 3838
3834static void ipw_bg_disassociate(void *data) 3839static void ipw_bg_disassociate(struct work_struct *work)
3835{ 3840{
3836 struct ipw_priv *priv = data; 3841 struct ipw_priv *priv =
3842 container_of(work, struct ipw_priv, disassociate);
3837 mutex_lock(&priv->mutex); 3843 mutex_lock(&priv->mutex);
3838 ipw_disassociate(data); 3844 ipw_disassociate(priv);
3839 mutex_unlock(&priv->mutex); 3845 mutex_unlock(&priv->mutex);
3840} 3846}
3841 3847
3842static void ipw_system_config(void *data) 3848static void ipw_system_config(struct work_struct *work)
3843{ 3849{
3844 struct ipw_priv *priv = data; 3850 struct ipw_priv *priv =
3851 container_of(work, struct ipw_priv, system_config);
3845 3852
3846#ifdef CONFIG_IPW2200_PROMISCUOUS 3853#ifdef CONFIG_IPW2200_PROMISCUOUS
3847 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 3854 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
@@ -4208,11 +4215,12 @@ static void ipw_gather_stats(struct ipw_priv *priv)
4208 IPW_STATS_INTERVAL); 4215 IPW_STATS_INTERVAL);
4209} 4216}
4210 4217
4211static void ipw_bg_gather_stats(void *data) 4218static void ipw_bg_gather_stats(struct work_struct *work)
4212{ 4219{
4213 struct ipw_priv *priv = data; 4220 struct ipw_priv *priv =
4221 container_of(work, struct ipw_priv, gather_stats.work);
4214 mutex_lock(&priv->mutex); 4222 mutex_lock(&priv->mutex);
4215 ipw_gather_stats(data); 4223 ipw_gather_stats(priv);
4216 mutex_unlock(&priv->mutex); 4224 mutex_unlock(&priv->mutex);
4217} 4225}
4218 4226
@@ -4268,8 +4276,8 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4268 if (!(priv->status & STATUS_ROAMING)) { 4276 if (!(priv->status & STATUS_ROAMING)) {
4269 priv->status |= STATUS_ROAMING; 4277 priv->status |= STATUS_ROAMING;
4270 if (!(priv->status & STATUS_SCANNING)) 4278 if (!(priv->status & STATUS_SCANNING))
4271 queue_work(priv->workqueue, 4279 queue_delayed_work(priv->workqueue,
4272 &priv->request_scan); 4280 &priv->request_scan, 0);
4273 } 4281 }
4274 return; 4282 return;
4275 } 4283 }
@@ -4607,8 +4615,8 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4607#ifdef CONFIG_IPW2200_MONITOR 4615#ifdef CONFIG_IPW2200_MONITOR
4608 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4616 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4609 priv->status |= STATUS_SCAN_FORCED; 4617 priv->status |= STATUS_SCAN_FORCED;
4610 queue_work(priv->workqueue, 4618 queue_delayed_work(priv->workqueue,
4611 &priv->request_scan); 4619 &priv->request_scan, 0);
4612 break; 4620 break;
4613 } 4621 }
4614 priv->status &= ~STATUS_SCAN_FORCED; 4622 priv->status &= ~STATUS_SCAN_FORCED;
@@ -4631,8 +4639,8 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4631 /* Don't schedule if we aborted the scan */ 4639 /* Don't schedule if we aborted the scan */
4632 priv->status &= ~STATUS_ROAMING; 4640 priv->status &= ~STATUS_ROAMING;
4633 } else if (priv->status & STATUS_SCAN_PENDING) 4641 } else if (priv->status & STATUS_SCAN_PENDING)
4634 queue_work(priv->workqueue, 4642 queue_delayed_work(priv->workqueue,
4635 &priv->request_scan); 4643 &priv->request_scan, 0);
4636 else if (priv->config & CFG_BACKGROUND_SCAN 4644 else if (priv->config & CFG_BACKGROUND_SCAN
4637 && priv->status & STATUS_ASSOCIATED) 4645 && priv->status & STATUS_ASSOCIATED)
4638 queue_delayed_work(priv->workqueue, 4646 queue_delayed_work(priv->workqueue,
@@ -5055,11 +5063,12 @@ static void ipw_rx_queue_replenish(void *data)
5055 ipw_rx_queue_restock(priv); 5063 ipw_rx_queue_restock(priv);
5056} 5064}
5057 5065
5058static void ipw_bg_rx_queue_replenish(void *data) 5066static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5059{ 5067{
5060 struct ipw_priv *priv = data; 5068 struct ipw_priv *priv =
5069 container_of(work, struct ipw_priv, rx_replenish);
5061 mutex_lock(&priv->mutex); 5070 mutex_lock(&priv->mutex);
5062 ipw_rx_queue_replenish(data); 5071 ipw_rx_queue_replenish(priv);
5063 mutex_unlock(&priv->mutex); 5072 mutex_unlock(&priv->mutex);
5064} 5073}
5065 5074
@@ -5489,9 +5498,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5489 return 1; 5498 return 1;
5490} 5499}
5491 5500
5492static void ipw_merge_adhoc_network(void *data) 5501static void ipw_merge_adhoc_network(struct work_struct *work)
5493{ 5502{
5494 struct ipw_priv *priv = data; 5503 struct ipw_priv *priv =
5504 container_of(work, struct ipw_priv, merge_networks);
5495 struct ieee80211_network *network = NULL; 5505 struct ieee80211_network *network = NULL;
5496 struct ipw_network_match match = { 5506 struct ipw_network_match match = {
5497 .network = priv->assoc_network 5507 .network = priv->assoc_network
@@ -5948,11 +5958,12 @@ static void ipw_adhoc_check(void *data)
5948 priv->assoc_request.beacon_interval); 5958 priv->assoc_request.beacon_interval);
5949} 5959}
5950 5960
5951static void ipw_bg_adhoc_check(void *data) 5961static void ipw_bg_adhoc_check(struct work_struct *work)
5952{ 5962{
5953 struct ipw_priv *priv = data; 5963 struct ipw_priv *priv =
5964 container_of(work, struct ipw_priv, adhoc_check.work);
5954 mutex_lock(&priv->mutex); 5965 mutex_lock(&priv->mutex);
5955 ipw_adhoc_check(data); 5966 ipw_adhoc_check(priv);
5956 mutex_unlock(&priv->mutex); 5967 mutex_unlock(&priv->mutex);
5957} 5968}
5958 5969
@@ -6299,19 +6310,26 @@ done:
6299 return err; 6310 return err;
6300} 6311}
6301 6312
6302static int ipw_request_passive_scan(struct ipw_priv *priv) { 6313static void ipw_request_passive_scan(struct work_struct *work)
6303 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); 6314{
6315 struct ipw_priv *priv =
6316 container_of(work, struct ipw_priv, request_passive_scan);
6317 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6304} 6318}
6305 6319
6306static int ipw_request_scan(struct ipw_priv *priv) { 6320static void ipw_request_scan(struct work_struct *work)
6307 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); 6321{
6322 struct ipw_priv *priv =
6323 container_of(work, struct ipw_priv, request_scan.work);
6324 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6308} 6325}
6309 6326
6310static void ipw_bg_abort_scan(void *data) 6327static void ipw_bg_abort_scan(struct work_struct *work)
6311{ 6328{
6312 struct ipw_priv *priv = data; 6329 struct ipw_priv *priv =
6330 container_of(work, struct ipw_priv, abort_scan);
6313 mutex_lock(&priv->mutex); 6331 mutex_lock(&priv->mutex);
6314 ipw_abort_scan(data); 6332 ipw_abort_scan(priv);
6315 mutex_unlock(&priv->mutex); 6333 mutex_unlock(&priv->mutex);
6316} 6334}
6317 6335
@@ -7084,9 +7102,10 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7084/* 7102/*
7085* background support to run QoS activate functionality 7103* background support to run QoS activate functionality
7086*/ 7104*/
7087static void ipw_bg_qos_activate(void *data) 7105static void ipw_bg_qos_activate(struct work_struct *work)
7088{ 7106{
7089 struct ipw_priv *priv = data; 7107 struct ipw_priv *priv =
7108 container_of(work, struct ipw_priv, qos_activate);
7090 7109
7091 if (priv == NULL) 7110 if (priv == NULL)
7092 return; 7111 return;
@@ -7394,11 +7413,12 @@ static void ipw_roam(void *data)
7394 priv->status &= ~STATUS_ROAMING; 7413 priv->status &= ~STATUS_ROAMING;
7395} 7414}
7396 7415
7397static void ipw_bg_roam(void *data) 7416static void ipw_bg_roam(struct work_struct *work)
7398{ 7417{
7399 struct ipw_priv *priv = data; 7418 struct ipw_priv *priv =
7419 container_of(work, struct ipw_priv, roam);
7400 mutex_lock(&priv->mutex); 7420 mutex_lock(&priv->mutex);
7401 ipw_roam(data); 7421 ipw_roam(priv);
7402 mutex_unlock(&priv->mutex); 7422 mutex_unlock(&priv->mutex);
7403} 7423}
7404 7424
@@ -7479,8 +7499,8 @@ static int ipw_associate(void *data)
7479 &priv->request_scan, 7499 &priv->request_scan,
7480 SCAN_INTERVAL); 7500 SCAN_INTERVAL);
7481 else 7501 else
7482 queue_work(priv->workqueue, 7502 queue_delayed_work(priv->workqueue,
7483 &priv->request_scan); 7503 &priv->request_scan, 0);
7484 } 7504 }
7485 7505
7486 return 0; 7506 return 0;
@@ -7491,11 +7511,12 @@ static int ipw_associate(void *data)
7491 return 1; 7511 return 1;
7492} 7512}
7493 7513
7494static void ipw_bg_associate(void *data) 7514static void ipw_bg_associate(struct work_struct *work)
7495{ 7515{
7496 struct ipw_priv *priv = data; 7516 struct ipw_priv *priv =
7517 container_of(work, struct ipw_priv, associate);
7497 mutex_lock(&priv->mutex); 7518 mutex_lock(&priv->mutex);
7498 ipw_associate(data); 7519 ipw_associate(priv);
7499 mutex_unlock(&priv->mutex); 7520 mutex_unlock(&priv->mutex);
7500} 7521}
7501 7522
@@ -9410,7 +9431,7 @@ static int ipw_wx_set_scan(struct net_device *dev,
9410 9431
9411 IPW_DEBUG_WX("Start scan\n"); 9432 IPW_DEBUG_WX("Start scan\n");
9412 9433
9413 queue_work(priv->workqueue, &priv->request_scan); 9434 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
9414 9435
9415 return 0; 9436 return 0;
9416} 9437}
@@ -10547,11 +10568,12 @@ static void ipw_rf_kill(void *adapter)
10547 spin_unlock_irqrestore(&priv->lock, flags); 10568 spin_unlock_irqrestore(&priv->lock, flags);
10548} 10569}
10549 10570
10550static void ipw_bg_rf_kill(void *data) 10571static void ipw_bg_rf_kill(struct work_struct *work)
10551{ 10572{
10552 struct ipw_priv *priv = data; 10573 struct ipw_priv *priv =
10574 container_of(work, struct ipw_priv, rf_kill.work);
10553 mutex_lock(&priv->mutex); 10575 mutex_lock(&priv->mutex);
10554 ipw_rf_kill(data); 10576 ipw_rf_kill(priv);
10555 mutex_unlock(&priv->mutex); 10577 mutex_unlock(&priv->mutex);
10556} 10578}
10557 10579
@@ -10582,11 +10604,12 @@ static void ipw_link_up(struct ipw_priv *priv)
10582 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); 10604 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10583} 10605}
10584 10606
10585static void ipw_bg_link_up(void *data) 10607static void ipw_bg_link_up(struct work_struct *work)
10586{ 10608{
10587 struct ipw_priv *priv = data; 10609 struct ipw_priv *priv =
10610 container_of(work, struct ipw_priv, link_up);
10588 mutex_lock(&priv->mutex); 10611 mutex_lock(&priv->mutex);
10589 ipw_link_up(data); 10612 ipw_link_up(priv);
10590 mutex_unlock(&priv->mutex); 10613 mutex_unlock(&priv->mutex);
10591} 10614}
10592 10615
@@ -10606,15 +10629,16 @@ static void ipw_link_down(struct ipw_priv *priv)
10606 10629
10607 if (!(priv->status & STATUS_EXIT_PENDING)) { 10630 if (!(priv->status & STATUS_EXIT_PENDING)) {
10608 /* Queue up another scan... */ 10631 /* Queue up another scan... */
10609 queue_work(priv->workqueue, &priv->request_scan); 10632 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10610 } 10633 }
10611} 10634}
10612 10635
10613static void ipw_bg_link_down(void *data) 10636static void ipw_bg_link_down(struct work_struct *work)
10614{ 10637{
10615 struct ipw_priv *priv = data; 10638 struct ipw_priv *priv =
10639 container_of(work, struct ipw_priv, link_down);
10616 mutex_lock(&priv->mutex); 10640 mutex_lock(&priv->mutex);
10617 ipw_link_down(data); 10641 ipw_link_down(priv);
10618 mutex_unlock(&priv->mutex); 10642 mutex_unlock(&priv->mutex);
10619} 10643}
10620 10644
@@ -10626,38 +10650,30 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
10626 init_waitqueue_head(&priv->wait_command_queue); 10650 init_waitqueue_head(&priv->wait_command_queue);
10627 init_waitqueue_head(&priv->wait_state); 10651 init_waitqueue_head(&priv->wait_state);
10628 10652
10629 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv); 10653 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10630 INIT_WORK(&priv->associate, ipw_bg_associate, priv); 10654 INIT_WORK(&priv->associate, ipw_bg_associate);
10631 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv); 10655 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10632 INIT_WORK(&priv->system_config, ipw_system_config, priv); 10656 INIT_WORK(&priv->system_config, ipw_system_config);
10633 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv); 10657 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10634 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv); 10658 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10635 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv); 10659 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10636 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv); 10660 INIT_WORK(&priv->up, ipw_bg_up);
10637 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv); 10661 INIT_WORK(&priv->down, ipw_bg_down);
10638 INIT_WORK(&priv->request_scan, 10662 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10639 (void (*)(void *))ipw_request_scan, priv); 10663 INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10640 INIT_WORK(&priv->request_passive_scan, 10664 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10641 (void (*)(void *))ipw_request_passive_scan, priv); 10665 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10642 INIT_WORK(&priv->gather_stats, 10666 INIT_WORK(&priv->roam, ipw_bg_roam);
10643 (void (*)(void *))ipw_bg_gather_stats, priv); 10667 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10644 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv); 10668 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10645 INIT_WORK(&priv->roam, ipw_bg_roam, priv); 10669 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10646 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv); 10670 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10647 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv); 10671 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10648 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv); 10672 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10649 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on, 10673 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10650 priv);
10651 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10652 priv);
10653 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10654 priv);
10655 INIT_WORK(&priv->merge_networks,
10656 (void (*)(void *))ipw_merge_adhoc_network, priv);
10657 10674
10658#ifdef CONFIG_IPW2200_QOS 10675#ifdef CONFIG_IPW2200_QOS
10659 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate, 10676 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10660 priv);
10661#endif /* CONFIG_IPW2200_QOS */ 10677#endif /* CONFIG_IPW2200_QOS */
10662 10678
10663 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 10679 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@ -11190,7 +11206,8 @@ static int ipw_up(struct ipw_priv *priv)
11190 11206
11191 /* If configure to try and auto-associate, kick 11207 /* If configure to try and auto-associate, kick
11192 * off a scan. */ 11208 * off a scan. */
11193 queue_work(priv->workqueue, &priv->request_scan); 11209 queue_delayed_work(priv->workqueue,
11210 &priv->request_scan, 0);
11194 11211
11195 return 0; 11212 return 0;
11196 } 11213 }
@@ -11211,11 +11228,12 @@ static int ipw_up(struct ipw_priv *priv)
11211 return -EIO; 11228 return -EIO;
11212} 11229}
11213 11230
11214static void ipw_bg_up(void *data) 11231static void ipw_bg_up(struct work_struct *work)
11215{ 11232{
11216 struct ipw_priv *priv = data; 11233 struct ipw_priv *priv =
11234 container_of(work, struct ipw_priv, up);
11217 mutex_lock(&priv->mutex); 11235 mutex_lock(&priv->mutex);
11218 ipw_up(data); 11236 ipw_up(priv);
11219 mutex_unlock(&priv->mutex); 11237 mutex_unlock(&priv->mutex);
11220} 11238}
11221 11239
@@ -11282,11 +11300,12 @@ static void ipw_down(struct ipw_priv *priv)
11282 ipw_led_radio_off(priv); 11300 ipw_led_radio_off(priv);
11283} 11301}
11284 11302
11285static void ipw_bg_down(void *data) 11303static void ipw_bg_down(struct work_struct *work)
11286{ 11304{
11287 struct ipw_priv *priv = data; 11305 struct ipw_priv *priv =
11306 container_of(work, struct ipw_priv, down);
11288 mutex_lock(&priv->mutex); 11307 mutex_lock(&priv->mutex);
11289 ipw_down(data); 11308 ipw_down(priv);
11290 mutex_unlock(&priv->mutex); 11309 mutex_unlock(&priv->mutex);
11291} 11310}
11292 11311
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index dad5eedefbf1..626a240a87d8 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1290,21 +1290,21 @@ struct ipw_priv {
1290 1290
1291 struct workqueue_struct *workqueue; 1291 struct workqueue_struct *workqueue;
1292 1292
1293 struct work_struct adhoc_check; 1293 struct delayed_work adhoc_check;
1294 struct work_struct associate; 1294 struct work_struct associate;
1295 struct work_struct disassociate; 1295 struct work_struct disassociate;
1296 struct work_struct system_config; 1296 struct work_struct system_config;
1297 struct work_struct rx_replenish; 1297 struct work_struct rx_replenish;
1298 struct work_struct request_scan; 1298 struct delayed_work request_scan;
1299 struct work_struct request_passive_scan; 1299 struct work_struct request_passive_scan;
1300 struct work_struct adapter_restart; 1300 struct work_struct adapter_restart;
1301 struct work_struct rf_kill; 1301 struct delayed_work rf_kill;
1302 struct work_struct up; 1302 struct work_struct up;
1303 struct work_struct down; 1303 struct work_struct down;
1304 struct work_struct gather_stats; 1304 struct delayed_work gather_stats;
1305 struct work_struct abort_scan; 1305 struct work_struct abort_scan;
1306 struct work_struct roam; 1306 struct work_struct roam;
1307 struct work_struct scan_check; 1307 struct delayed_work scan_check;
1308 struct work_struct link_up; 1308 struct work_struct link_up;
1309 struct work_struct link_down; 1309 struct work_struct link_down;
1310 1310
@@ -1319,9 +1319,9 @@ struct ipw_priv {
1319 u32 led_ofdm_on; 1319 u32 led_ofdm_on;
1320 u32 led_ofdm_off; 1320 u32 led_ofdm_off;
1321 1321
1322 struct work_struct led_link_on; 1322 struct delayed_work led_link_on;
1323 struct work_struct led_link_off; 1323 struct delayed_work led_link_off;
1324 struct work_struct led_act_off; 1324 struct delayed_work led_act_off;
1325 struct work_struct merge_networks; 1325 struct work_struct merge_networks;
1326 1326
1327 struct ipw_cmd_log *cmdlog; 1327 struct ipw_cmd_log *cmdlog;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 336cabac13b3..936c888e03e1 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -980,9 +980,11 @@ static void print_linkstatus(struct net_device *dev, u16 status)
980} 980}
981 981
982/* Search scan results for requested BSSID, join it if found */ 982/* Search scan results for requested BSSID, join it if found */
983static void orinoco_join_ap(struct net_device *dev) 983static void orinoco_join_ap(struct work_struct *work)
984{ 984{
985 struct orinoco_private *priv = netdev_priv(dev); 985 struct orinoco_private *priv =
986 container_of(work, struct orinoco_private, join_work);
987 struct net_device *dev = priv->ndev;
986 struct hermes *hw = &priv->hw; 988 struct hermes *hw = &priv->hw;
987 int err; 989 int err;
988 unsigned long flags; 990 unsigned long flags;
@@ -1055,9 +1057,11 @@ static void orinoco_join_ap(struct net_device *dev)
1055} 1057}
1056 1058
1057/* Send new BSSID to userspace */ 1059/* Send new BSSID to userspace */
1058static void orinoco_send_wevents(struct net_device *dev) 1060static void orinoco_send_wevents(struct work_struct *work)
1059{ 1061{
1060 struct orinoco_private *priv = netdev_priv(dev); 1062 struct orinoco_private *priv =
1063 container_of(work, struct orinoco_private, wevent_work);
1064 struct net_device *dev = priv->ndev;
1061 struct hermes *hw = &priv->hw; 1065 struct hermes *hw = &priv->hw;
1062 union iwreq_data wrqu; 1066 union iwreq_data wrqu;
1063 int err; 1067 int err;
@@ -1864,9 +1868,11 @@ __orinoco_set_multicast_list(struct net_device *dev)
1864 1868
1865/* This must be called from user context, without locks held - use 1869/* This must be called from user context, without locks held - use
1866 * schedule_work() */ 1870 * schedule_work() */
1867static void orinoco_reset(struct net_device *dev) 1871static void orinoco_reset(struct work_struct *work)
1868{ 1872{
1869 struct orinoco_private *priv = netdev_priv(dev); 1873 struct orinoco_private *priv =
1874 container_of(work, struct orinoco_private, reset_work);
1875 struct net_device *dev = priv->ndev;
1870 struct hermes *hw = &priv->hw; 1876 struct hermes *hw = &priv->hw;
1871 int err; 1877 int err;
1872 unsigned long flags; 1878 unsigned long flags;
@@ -2434,9 +2440,9 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2434 priv->hw_unavailable = 1; /* orinoco_init() must clear this 2440 priv->hw_unavailable = 1; /* orinoco_init() must clear this
2435 * before anything else touches the 2441 * before anything else touches the
2436 * hardware */ 2442 * hardware */
2437 INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev); 2443 INIT_WORK(&priv->reset_work, orinoco_reset);
2438 INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev); 2444 INIT_WORK(&priv->join_work, orinoco_join_ap);
2439 INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev); 2445 INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
2440 2446
2441 netif_carrier_off(dev); 2447 netif_carrier_off(dev);
2442 priv->last_linkstatus = 0xffff; 2448 priv->last_linkstatus = 0xffff;
@@ -3608,7 +3614,7 @@ static int orinoco_ioctl_reset(struct net_device *dev,
3608 printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); 3614 printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
3609 3615
3610 /* Firmware reset */ 3616 /* Firmware reset */
3611 orinoco_reset(dev); 3617 orinoco_reset(&priv->reset_work);
3612 } else { 3618 } else {
3613 printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); 3619 printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
3614 3620
@@ -4154,7 +4160,7 @@ static int orinoco_ioctl_commit(struct net_device *dev,
4154 return 0; 4160 return 0;
4155 4161
4156 if (priv->broken_disableport) { 4162 if (priv->broken_disableport) {
4157 orinoco_reset(dev); 4163 orinoco_reset(&priv->reset_work);
4158 return 0; 4164 return 0;
4159 } 4165 }
4160 4166
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 4a20e45de3ca..a87eb51886c8 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -157,8 +157,9 @@ prism54_mib_init(islpci_private *priv)
157 * schedule_work(), thus we can as well use sleeping semaphore 157 * schedule_work(), thus we can as well use sleeping semaphore
158 * locking */ 158 * locking */
159void 159void
160prism54_update_stats(islpci_private *priv) 160prism54_update_stats(struct work_struct *work)
161{ 161{
162 islpci_private *priv = container_of(work, islpci_private, stats_work);
162 char *data; 163 char *data;
163 int j; 164 int j;
164 struct obj_bss bss, *bss2; 165 struct obj_bss bss, *bss2;
@@ -2493,9 +2494,10 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
2493 * interrupt context, no locks held. 2494 * interrupt context, no locks held.
2494 */ 2495 */
2495void 2496void
2496prism54_process_trap(void *data) 2497prism54_process_trap(struct work_struct *work)
2497{ 2498{
2498 struct islpci_mgmtframe *frame = data; 2499 struct islpci_mgmtframe *frame =
2500 container_of(work, struct islpci_mgmtframe, ws);
2499 struct net_device *ndev = frame->ndev; 2501 struct net_device *ndev = frame->ndev;
2500 enum oid_num_t n = mgt_oidtonum(frame->header->oid); 2502 enum oid_num_t n = mgt_oidtonum(frame->header->oid);
2501 2503
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index e8183d30c52e..bcfbfb9281d2 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -31,12 +31,12 @@
31void prism54_mib_init(islpci_private *); 31void prism54_mib_init(islpci_private *);
32 32
33struct iw_statistics *prism54_get_wireless_stats(struct net_device *); 33struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
34void prism54_update_stats(islpci_private *); 34void prism54_update_stats(struct work_struct *);
35 35
36void prism54_acl_init(struct islpci_acl *); 36void prism54_acl_init(struct islpci_acl *);
37void prism54_acl_clean(struct islpci_acl *); 37void prism54_acl_clean(struct islpci_acl *);
38 38
39void prism54_process_trap(void *); 39void prism54_process_trap(struct work_struct *);
40 40
41void prism54_wpa_bss_ie_init(islpci_private *priv); 41void prism54_wpa_bss_ie_init(islpci_private *priv);
42void prism54_wpa_bss_ie_clean(islpci_private *priv); 42void prism54_wpa_bss_ie_clean(islpci_private *priv);
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 1e0603ca436c..f057fd9fcd79 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -860,11 +860,10 @@ islpci_setup(struct pci_dev *pdev)
860 priv->state_off = 1; 860 priv->state_off = 1;
861 861
862 /* initialize workqueue's */ 862 /* initialize workqueue's */
863 INIT_WORK(&priv->stats_work, 863 INIT_WORK(&priv->stats_work, prism54_update_stats);
864 (void (*)(void *)) prism54_update_stats, priv);
865 priv->stats_timestamp = 0; 864 priv->stats_timestamp = 0;
866 865
867 INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv); 866 INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
868 priv->reset_task_pending = 0; 867 priv->reset_task_pending = 0;
869 868
870 /* allocate various memory areas */ 869 /* allocate various memory areas */
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 676d83813dc8..b1122912ee2d 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -480,9 +480,9 @@ islpci_eth_receive(islpci_private *priv)
480} 480}
481 481
482void 482void
483islpci_do_reset_and_wake(void *data) 483islpci_do_reset_and_wake(struct work_struct *work)
484{ 484{
485 islpci_private *priv = data; 485 islpci_private *priv = container_of(work, islpci_private, reset_task);
486 486
487 islpci_reset(priv, 1); 487 islpci_reset(priv, 1);
488 priv->reset_task_pending = 0; 488 priv->reset_task_pending = 0;
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 26789454067c..5bf820defbd0 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -67,6 +67,6 @@ void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *);
67int islpci_eth_transmit(struct sk_buff *, struct net_device *); 67int islpci_eth_transmit(struct sk_buff *, struct net_device *);
68int islpci_eth_receive(islpci_private *); 68int islpci_eth_receive(islpci_private *);
69void islpci_eth_tx_timeout(struct net_device *); 69void islpci_eth_tx_timeout(struct net_device *);
70void islpci_do_reset_and_wake(void *data); 70void islpci_do_reset_and_wake(struct work_struct *);
71 71
72#endif /* _ISL_GEN_H */ 72#endif /* _ISL_GEN_H */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 036a875054c9..2246f7930b4e 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -386,7 +386,7 @@ islpci_mgt_receive(struct net_device *ndev)
386 386
387 /* Create work to handle trap out of interrupt 387 /* Create work to handle trap out of interrupt
388 * context. */ 388 * context. */
389 INIT_WORK(&frame->ws, prism54_process_trap, frame); 389 INIT_WORK(&frame->ws, prism54_process_trap);
390 schedule_work(&frame->ws); 390 schedule_work(&frame->ws);
391 391
392 } else { 392 } else {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 2696f95b9278..44f3cfd4cc1d 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1182,9 +1182,10 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
1182 1182
1183#define LINK_LED_WORK_DELAY HZ 1183#define LINK_LED_WORK_DELAY HZ
1184 1184
1185static void link_led_handler(void *p) 1185static void link_led_handler(struct work_struct *work)
1186{ 1186{
1187 struct zd_mac *mac = p; 1187 struct zd_mac *mac =
1188 container_of(work, struct zd_mac, housekeeping.link_led_work.work);
1188 struct zd_chip *chip = &mac->chip; 1189 struct zd_chip *chip = &mac->chip;
1189 struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev); 1190 struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
1190 int is_associated; 1191 int is_associated;
@@ -1205,7 +1206,7 @@ static void link_led_handler(void *p)
1205 1206
1206static void housekeeping_init(struct zd_mac *mac) 1207static void housekeeping_init(struct zd_mac *mac)
1207{ 1208{
1208 INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac); 1209 INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
1209} 1210}
1210 1211
1211static void housekeeping_enable(struct zd_mac *mac) 1212static void housekeeping_enable(struct zd_mac *mac)
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 5dcfb251f02e..08d6b8c08e75 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -119,7 +119,7 @@ struct rx_status {
119#define ZD_RX_ERROR 0x80 119#define ZD_RX_ERROR 0x80
120 120
121struct housekeeping { 121struct housekeeping {
122 struct work_struct link_led_work; 122 struct delayed_work link_led_work;
123}; 123};
124 124
125#define ZD_MAC_STATS_BUFFER_SIZE 16 125#define ZD_MAC_STATS_BUFFER_SIZE 16
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index fc4bc9b94c74..a83c3db7d18f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -29,7 +29,7 @@
29 29
30struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; 30struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
31 31
32static void wq_sync_buffer(void *); 32static void wq_sync_buffer(struct work_struct *work);
33 33
34#define DEFAULT_TIMER_EXPIRE (HZ / 10) 34#define DEFAULT_TIMER_EXPIRE (HZ / 10)
35static int work_enabled; 35static int work_enabled;
@@ -65,7 +65,7 @@ int alloc_cpu_buffers(void)
65 b->sample_received = 0; 65 b->sample_received = 0;
66 b->sample_lost_overflow = 0; 66 b->sample_lost_overflow = 0;
67 b->cpu = i; 67 b->cpu = i;
68 INIT_WORK(&b->work, wq_sync_buffer, b); 68 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
69 } 69 }
70 return 0; 70 return 0;
71 71
@@ -282,9 +282,10 @@ void oprofile_add_trace(unsigned long pc)
282 * By using schedule_delayed_work_on and then schedule_delayed_work 282 * By using schedule_delayed_work_on and then schedule_delayed_work
283 * we guarantee this will stay on the correct cpu 283 * we guarantee this will stay on the correct cpu
284 */ 284 */
285static void wq_sync_buffer(void * data) 285static void wq_sync_buffer(struct work_struct *work)
286{ 286{
287 struct oprofile_cpu_buffer * b = data; 287 struct oprofile_cpu_buffer * b =
288 container_of(work, struct oprofile_cpu_buffer, work.work);
288 if (b->cpu != smp_processor_id()) { 289 if (b->cpu != smp_processor_id()) {
289 printk("WQ on CPU%d, prefer CPU%d\n", 290 printk("WQ on CPU%d, prefer CPU%d\n",
290 smp_processor_id(), b->cpu); 291 smp_processor_id(), b->cpu);
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 09abb80e0570..49900d9e3235 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -43,7 +43,7 @@ struct oprofile_cpu_buffer {
43 unsigned long sample_lost_overflow; 43 unsigned long sample_lost_overflow;
44 unsigned long backtrace_aborted; 44 unsigned long backtrace_aborted;
45 int cpu; 45 int cpu;
46 struct work_struct work; 46 struct delayed_work work;
47} ____cacheline_aligned; 47} ____cacheline_aligned;
48 48
49extern struct oprofile_cpu_buffer cpu_buffer[]; 49extern struct oprofile_cpu_buffer cpu_buffer[];
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index ea2087c34149..50757695844f 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -70,7 +70,7 @@ struct slot {
70 struct hotplug_slot *hotplug_slot; 70 struct hotplug_slot *hotplug_slot;
71 struct list_head slot_list; 71 struct list_head slot_list;
72 char name[SLOT_NAME_SIZE]; 72 char name[SLOT_NAME_SIZE];
73 struct work_struct work; /* work for button event */ 73 struct delayed_work work; /* work for button event */
74 struct mutex lock; 74 struct mutex lock;
75}; 75};
76 76
@@ -187,7 +187,7 @@ extern int shpchp_configure_device(struct slot *p_slot);
187extern int shpchp_unconfigure_device(struct slot *p_slot); 187extern int shpchp_unconfigure_device(struct slot *p_slot);
188extern void shpchp_remove_ctrl_files(struct controller *ctrl); 188extern void shpchp_remove_ctrl_files(struct controller *ctrl);
189extern void cleanup_slots(struct controller *ctrl); 189extern void cleanup_slots(struct controller *ctrl);
190extern void queue_pushbutton_work(void *data); 190extern void queue_pushbutton_work(struct work_struct *work);
191 191
192 192
193#ifdef CONFIG_ACPI 193#ifdef CONFIG_ACPI
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 235c18a22393..4eac85b3d90e 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -159,7 +159,7 @@ static int init_slots(struct controller *ctrl)
159 goto error_info; 159 goto error_info;
160 160
161 slot->number = sun; 161 slot->number = sun;
162 INIT_WORK(&slot->work, queue_pushbutton_work, slot); 162 INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work);
163 163
164 /* register this slot with the hotplug pci core */ 164 /* register this slot with the hotplug pci core */
165 hotplug_slot->private = slot; 165 hotplug_slot->private = slot;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index c39901dbff20..158ac7836096 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -36,7 +36,7 @@
36#include "../pci.h" 36#include "../pci.h"
37#include "shpchp.h" 37#include "shpchp.h"
38 38
39static void interrupt_event_handler(void *data); 39static void interrupt_event_handler(struct work_struct *work);
40static int shpchp_enable_slot(struct slot *p_slot); 40static int shpchp_enable_slot(struct slot *p_slot);
41static int shpchp_disable_slot(struct slot *p_slot); 41static int shpchp_disable_slot(struct slot *p_slot);
42 42
@@ -50,7 +50,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
50 50
51 info->event_type = event_type; 51 info->event_type = event_type;
52 info->p_slot = p_slot; 52 info->p_slot = p_slot;
53 INIT_WORK(&info->work, interrupt_event_handler, info); 53 INIT_WORK(&info->work, interrupt_event_handler);
54 54
55 schedule_work(&info->work); 55 schedule_work(&info->work);
56 56
@@ -408,9 +408,10 @@ struct pushbutton_work_info {
408 * Handles all pending events and exits. 408 * Handles all pending events and exits.
409 * 409 *
410 */ 410 */
411static void shpchp_pushbutton_thread(void *data) 411static void shpchp_pushbutton_thread(struct work_struct *work)
412{ 412{
413 struct pushbutton_work_info *info = data; 413 struct pushbutton_work_info *info =
414 container_of(work, struct pushbutton_work_info, work);
414 struct slot *p_slot = info->p_slot; 415 struct slot *p_slot = info->p_slot;
415 416
416 mutex_lock(&p_slot->lock); 417 mutex_lock(&p_slot->lock);
@@ -436,9 +437,9 @@ static void shpchp_pushbutton_thread(void *data)
436 kfree(info); 437 kfree(info);
437} 438}
438 439
439void queue_pushbutton_work(void *data) 440void queue_pushbutton_work(struct work_struct *work)
440{ 441{
441 struct slot *p_slot = data; 442 struct slot *p_slot = container_of(work, struct slot, work.work);
442 struct pushbutton_work_info *info; 443 struct pushbutton_work_info *info;
443 444
444 info = kmalloc(sizeof(*info), GFP_KERNEL); 445 info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -447,7 +448,7 @@ void queue_pushbutton_work(void *data)
447 return; 448 return;
448 } 449 }
449 info->p_slot = p_slot; 450 info->p_slot = p_slot;
450 INIT_WORK(&info->work, shpchp_pushbutton_thread, info); 451 INIT_WORK(&info->work, shpchp_pushbutton_thread);
451 452
452 mutex_lock(&p_slot->lock); 453 mutex_lock(&p_slot->lock);
453 switch (p_slot->state) { 454 switch (p_slot->state) {
@@ -541,9 +542,9 @@ static void handle_button_press_event(struct slot *p_slot)
541 } 542 }
542} 543}
543 544
544static void interrupt_event_handler(void *data) 545static void interrupt_event_handler(struct work_struct *work)
545{ 546{
546 struct event_info *info = data; 547 struct event_info *info = container_of(work, struct event_info, work);
547 struct slot *p_slot = info->p_slot; 548 struct slot *p_slot = info->p_slot;
548 549
549 mutex_lock(&p_slot->lock); 550 mutex_lock(&p_slot->lock);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 04c43ef529ac..55866b6b26fa 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -160,7 +160,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
160 rpc->e_lock = SPIN_LOCK_UNLOCKED; 160 rpc->e_lock = SPIN_LOCK_UNLOCKED;
161 161
162 rpc->rpd = dev; 162 rpc->rpd = dev;
163 INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev); 163 INIT_WORK(&rpc->dpc_handler, aer_isr);
164 rpc->prod_idx = rpc->cons_idx = 0; 164 rpc->prod_idx = rpc->cons_idx = 0;
165 mutex_init(&rpc->rpc_mutex); 165 mutex_init(&rpc->rpc_mutex);
166 init_waitqueue_head(&rpc->wait_release); 166 init_waitqueue_head(&rpc->wait_release);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index daf0cad88fc8..3c0a58f64dd8 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -118,7 +118,7 @@ extern struct bus_type pcie_port_bus_type;
118extern void aer_enable_rootport(struct aer_rpc *rpc); 118extern void aer_enable_rootport(struct aer_rpc *rpc);
119extern void aer_delete_rootport(struct aer_rpc *rpc); 119extern void aer_delete_rootport(struct aer_rpc *rpc);
120extern int aer_init(struct pcie_device *dev); 120extern int aer_init(struct pcie_device *dev);
121extern void aer_isr(void *context); 121extern void aer_isr(struct work_struct *work);
122extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 122extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
123extern int aer_osc_setup(struct pci_dev *dev); 123extern int aer_osc_setup(struct pci_dev *dev);
124 124
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 1c7e660d6535..08e13033ced8 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -690,14 +690,14 @@ static void aer_isr_one_error(struct pcie_device *p_device,
690 690
691/** 691/**
692 * aer_isr - consume errors detected by root port 692 * aer_isr - consume errors detected by root port
693 * @context: pointer to a private data of pcie device 693 * @work: definition of this work item
694 * 694 *
695 * Invoked, as DPC, when root port records new detected error 695 * Invoked, as DPC, when root port records new detected error
696 **/ 696 **/
697void aer_isr(void *context) 697void aer_isr(struct work_struct *work)
698{ 698{
699 struct pcie_device *p_device = (struct pcie_device *) context; 699 struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
700 struct aer_rpc *rpc = get_service_data(p_device); 700 struct pcie_device *p_device = rpc->rpd;
701 struct aer_err_source *e_src; 701 struct aer_err_source *e_src;
702 702
703 mutex_lock(&rpc->rpc_mutex); 703 mutex_lock(&rpc->rpc_mutex);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 21d83a895b21..ff14fd8f0cd1 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -698,9 +698,10 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
698} 698}
699 699
700 700
701static void pcmcia_delayed_add_pseudo_device(void *data) 701static void pcmcia_delayed_add_pseudo_device(struct work_struct *work)
702{ 702{
703 struct pcmcia_socket *s = data; 703 struct pcmcia_socket *s =
704 container_of(work, struct pcmcia_socket, device_add);
704 pcmcia_device_add(s, 0); 705 pcmcia_device_add(s, 0);
705 s->pcmcia_state.device_add_pending = 0; 706 s->pcmcia_state.device_add_pending = 0;
706} 707}
@@ -1246,7 +1247,7 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev,
1246 init_waitqueue_head(&socket->queue); 1247 init_waitqueue_head(&socket->queue);
1247#endif 1248#endif
1248 INIT_LIST_HEAD(&socket->devices_list); 1249 INIT_LIST_HEAD(&socket->devices_list);
1249 INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device, socket); 1250 INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device);
1250 memset(&socket->pcmcia_state, 0, sizeof(u8)); 1251 memset(&socket->pcmcia_state, 0, sizeof(u8));
1251 socket->device_count = 0; 1252 socket->device_count = 0;
1252 1253
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 814b9e1873f5..828b329e08e0 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -53,9 +53,10 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
53 * Routine to poll RTC seconds field for change as often as possible, 53 * Routine to poll RTC seconds field for change as often as possible,
54 * after first RTC_UIE use timer to reduce polling 54 * after first RTC_UIE use timer to reduce polling
55 */ 55 */
56static void rtc_uie_task(void *data) 56static void rtc_uie_task(struct work_struct *work)
57{ 57{
58 struct rtc_device *rtc = data; 58 struct rtc_device *rtc =
59 container_of(work, struct rtc_device, uie_task);
59 struct rtc_time tm; 60 struct rtc_time tm;
60 int num = 0; 61 int num = 0;
61 int err; 62 int err;
@@ -411,7 +412,7 @@ static int rtc_dev_add_device(struct class_device *class_dev,
411 spin_lock_init(&rtc->irq_lock); 412 spin_lock_init(&rtc->irq_lock);
412 init_waitqueue_head(&rtc->irq_queue); 413 init_waitqueue_head(&rtc->irq_queue);
413#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 414#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
414 INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc); 415 INIT_WORK(&rtc->uie_task, rtc_uie_task);
415 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); 416 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
416#endif 417#endif
417 418
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index a6aa91072880..bb3cb3360541 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -849,7 +849,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
849 hostdata->issue_queue = NULL; 849 hostdata->issue_queue = NULL;
850 hostdata->disconnected_queue = NULL; 850 hostdata->disconnected_queue = NULL;
851 851
852 INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata); 852 INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
853 853
854#ifdef NCR5380_STATS 854#ifdef NCR5380_STATS
855 for (i = 0; i < 8; ++i) { 855 for (i = 0; i < 8; ++i) {
@@ -1016,7 +1016,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1016 1016
1017 /* Run the coroutine if it isn't already running. */ 1017 /* Run the coroutine if it isn't already running. */
1018 /* Kick off command processing */ 1018 /* Kick off command processing */
1019 schedule_work(&hostdata->coroutine); 1019 schedule_delayed_work(&hostdata->coroutine, 0);
1020 return 0; 1020 return 0;
1021} 1021}
1022 1022
@@ -1033,9 +1033,10 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1033 * host lock and called routines may take the isa dma lock. 1033 * host lock and called routines may take the isa dma lock.
1034 */ 1034 */
1035 1035
1036static void NCR5380_main(void *p) 1036static void NCR5380_main(struct work_struct *work)
1037{ 1037{
1038 struct NCR5380_hostdata *hostdata = p; 1038 struct NCR5380_hostdata *hostdata =
1039 container_of(work, struct NCR5380_hostdata, coroutine.work);
1039 struct Scsi_Host *instance = hostdata->host; 1040 struct Scsi_Host *instance = hostdata->host;
1040 Scsi_Cmnd *tmp, *prev; 1041 Scsi_Cmnd *tmp, *prev;
1041 int done; 1042 int done;
@@ -1221,7 +1222,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1221 } /* if BASR_IRQ */ 1222 } /* if BASR_IRQ */
1222 spin_unlock_irqrestore(instance->host_lock, flags); 1223 spin_unlock_irqrestore(instance->host_lock, flags);
1223 if(!done) 1224 if(!done)
1224 schedule_work(&hostdata->coroutine); 1225 schedule_delayed_work(&hostdata->coroutine, 0);
1225 } while (!done); 1226 } while (!done);
1226 return IRQ_HANDLED; 1227 return IRQ_HANDLED;
1227} 1228}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 1bc73de496b0..713a108c02ef 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -271,7 +271,7 @@ struct NCR5380_hostdata {
271 unsigned long time_expires; /* in jiffies, set prior to sleeping */ 271 unsigned long time_expires; /* in jiffies, set prior to sleeping */
272 int select_time; /* timer in select for target response */ 272 int select_time; /* timer in select for target response */
273 volatile Scsi_Cmnd *selecting; 273 volatile Scsi_Cmnd *selecting;
274 struct work_struct coroutine; /* our co-routine */ 274 struct delayed_work coroutine; /* our co-routine */
275#ifdef NCR5380_STATS 275#ifdef NCR5380_STATS
276 unsigned timebase; /* Base for time calcs */ 276 unsigned timebase; /* Base for time calcs */
277 long time_read[8]; /* time to do reads */ 277 long time_read[8]; /* time to do reads */
@@ -298,7 +298,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
298#ifndef DONT_USE_INTR 298#ifndef DONT_USE_INTR
299static irqreturn_t NCR5380_intr(int irq, void *dev_id); 299static irqreturn_t NCR5380_intr(int irq, void *dev_id);
300#endif 300#endif
301static void NCR5380_main(void *ptr); 301static void NCR5380_main(struct work_struct *work);
302static void NCR5380_print_options(struct Scsi_Host *instance); 302static void NCR5380_print_options(struct Scsi_Host *instance);
303#ifdef NDEBUG 303#ifdef NDEBUG
304static void NCR5380_print_phase(struct Scsi_Host *instance); 304static void NCR5380_print_phase(struct Scsi_Host *instance);
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 306f46b85a55..0cec742d12e9 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1443,7 +1443,7 @@ static struct work_struct aha152x_tq;
1443 * Run service completions on the card with interrupts enabled. 1443 * Run service completions on the card with interrupts enabled.
1444 * 1444 *
1445 */ 1445 */
1446static void run(void) 1446static void run(struct work_struct *work)
1447{ 1447{
1448 struct aha152x_hostdata *hd; 1448 struct aha152x_hostdata *hd;
1449 1449
@@ -1499,7 +1499,7 @@ static irqreturn_t intr(int irqno, void *dev_id)
1499 HOSTDATA(shpnt)->service=1; 1499 HOSTDATA(shpnt)->service=1;
1500 1500
1501 /* Poke the BH handler */ 1501 /* Poke the BH handler */
1502 INIT_WORK(&aha152x_tq, (void *) run, NULL); 1502 INIT_WORK(&aha152x_tq, run);
1503 schedule_work(&aha152x_tq); 1503 schedule_work(&aha152x_tq);
1504 } 1504 }
1505 DO_UNLOCK(flags); 1505 DO_UNLOCK(flags);
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index e31f6122106f..0464c182c577 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -36,7 +36,7 @@ typedef struct {
36 int base_hi; /* Hi Base address for ECP-ISA chipset */ 36 int base_hi; /* Hi Base address for ECP-ISA chipset */
37 int mode; /* Transfer mode */ 37 int mode; /* Transfer mode */
38 struct scsi_cmnd *cur_cmd; /* Current queued command */ 38 struct scsi_cmnd *cur_cmd; /* Current queued command */
39 struct work_struct imm_tq; /* Polling interrupt stuff */ 39 struct delayed_work imm_tq; /* Polling interrupt stuff */
40 unsigned long jstart; /* Jiffies at start */ 40 unsigned long jstart; /* Jiffies at start */
41 unsigned failed:1; /* Failure flag */ 41 unsigned failed:1; /* Failure flag */
42 unsigned dp:1; /* Data phase present */ 42 unsigned dp:1; /* Data phase present */
@@ -733,9 +733,9 @@ static int imm_completion(struct scsi_cmnd *cmd)
733 * the scheduler's task queue to generate a stream of call-backs and 733 * the scheduler's task queue to generate a stream of call-backs and
734 * complete the request when the drive is ready. 734 * complete the request when the drive is ready.
735 */ 735 */
736static void imm_interrupt(void *data) 736static void imm_interrupt(struct work_struct *work)
737{ 737{
738 imm_struct *dev = (imm_struct *) data; 738 imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
739 struct scsi_cmnd *cmd = dev->cur_cmd; 739 struct scsi_cmnd *cmd = dev->cur_cmd;
740 struct Scsi_Host *host = cmd->device->host; 740 struct Scsi_Host *host = cmd->device->host;
741 unsigned long flags; 741 unsigned long flags;
@@ -745,7 +745,6 @@ static void imm_interrupt(void *data)
745 return; 745 return;
746 } 746 }
747 if (imm_engine(dev, cmd)) { 747 if (imm_engine(dev, cmd)) {
748 INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev);
749 schedule_delayed_work(&dev->imm_tq, 1); 748 schedule_delayed_work(&dev->imm_tq, 1);
750 return; 749 return;
751 } 750 }
@@ -953,8 +952,7 @@ static int imm_queuecommand(struct scsi_cmnd *cmd,
953 cmd->result = DID_ERROR << 16; /* default return code */ 952 cmd->result = DID_ERROR << 16; /* default return code */
954 cmd->SCp.phase = 0; /* bus free */ 953 cmd->SCp.phase = 0; /* bus free */
955 954
956 INIT_WORK(&dev->imm_tq, imm_interrupt, dev); 955 schedule_delayed_work(&dev->imm_tq, 0);
957 schedule_work(&dev->imm_tq);
958 956
959 imm_pb_claim(dev); 957 imm_pb_claim(dev);
960 958
@@ -1225,7 +1223,7 @@ static int __imm_attach(struct parport *pb)
1225 else 1223 else
1226 ports = 8; 1224 ports = 8;
1227 1225
1228 INIT_WORK(&dev->imm_tq, imm_interrupt, dev); 1226 INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt);
1229 1227
1230 err = -ENOMEM; 1228 err = -ENOMEM;
1231 host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); 1229 host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2dde821025f3..d51c3e764bb0 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2093,7 +2093,7 @@ static void ipr_release_dump(struct kref *kref)
2093 2093
2094/** 2094/**
2095 * ipr_worker_thread - Worker thread 2095 * ipr_worker_thread - Worker thread
2096 * @data: ioa config struct 2096 * @work: ioa config struct
2097 * 2097 *
2098 * Called at task level from a work thread. This function takes care 2098 * Called at task level from a work thread. This function takes care
2099 * of adding and removing device from the mid-layer as configuration 2099 * of adding and removing device from the mid-layer as configuration
@@ -2102,13 +2102,14 @@ static void ipr_release_dump(struct kref *kref)
2102 * Return value: 2102 * Return value:
2103 * nothing 2103 * nothing
2104 **/ 2104 **/
2105static void ipr_worker_thread(void *data) 2105static void ipr_worker_thread(struct work_struct *work)
2106{ 2106{
2107 unsigned long lock_flags; 2107 unsigned long lock_flags;
2108 struct ipr_resource_entry *res; 2108 struct ipr_resource_entry *res;
2109 struct scsi_device *sdev; 2109 struct scsi_device *sdev;
2110 struct ipr_dump *dump; 2110 struct ipr_dump *dump;
2111 struct ipr_ioa_cfg *ioa_cfg = data; 2111 struct ipr_ioa_cfg *ioa_cfg =
2112 container_of(work, struct ipr_ioa_cfg, work_q);
2112 u8 bus, target, lun; 2113 u8 bus, target, lun;
2113 int did_work; 2114 int did_work;
2114 2115
@@ -6926,7 +6927,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6926 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 6927 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6927 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 6928 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6928 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 6929 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6929 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); 6930 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
6930 init_waitqueue_head(&ioa_cfg->reset_wait_q); 6931 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6931 ioa_cfg->sdt_state = INACTIVE; 6932 ioa_cfg->sdt_state = INACTIVE;
6932 if (ipr_enable_cache) 6933 if (ipr_enable_cache)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 5d8862189485..e11b23c641e2 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -719,9 +719,10 @@ again:
719 return rc; 719 return rc;
720} 720}
721 721
722static void iscsi_xmitworker(void *data) 722static void iscsi_xmitworker(struct work_struct *work)
723{ 723{
724 struct iscsi_conn *conn = data; 724 struct iscsi_conn *conn =
725 container_of(work, struct iscsi_conn, xmitwork);
725 int rc; 726 int rc;
726 /* 727 /*
727 * serialize Xmit worker on a per-connection basis. 728 * serialize Xmit worker on a per-connection basis.
@@ -1512,7 +1513,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1512 if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) 1513 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
1513 goto mgmtqueue_alloc_fail; 1514 goto mgmtqueue_alloc_fail;
1514 1515
1515 INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn); 1516 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
1516 1517
1517 /* allocate login_mtask used for the login/text sequences */ 1518 /* allocate login_mtask used for the login/text sequences */
1518 spin_lock_bh(&session->lock); 1519 spin_lock_bh(&session->lock);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d977bd492d8d..fb7df7b75811 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -647,10 +647,12 @@ void sas_unregister_domain_devices(struct asd_sas_port *port)
647 * Discover process only interrogates devices in order to discover the 647 * Discover process only interrogates devices in order to discover the
648 * domain. 648 * domain.
649 */ 649 */
650static void sas_discover_domain(void *data) 650static void sas_discover_domain(struct work_struct *work)
651{ 651{
652 int error = 0; 652 int error = 0;
653 struct asd_sas_port *port = data; 653 struct sas_discovery_event *ev =
654 container_of(work, struct sas_discovery_event, work);
655 struct asd_sas_port *port = ev->port;
654 656
655 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, 657 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
656 &port->disc.pending); 658 &port->disc.pending);
@@ -692,10 +694,12 @@ static void sas_discover_domain(void *data)
692 current->pid, error); 694 current->pid, error);
693} 695}
694 696
695static void sas_revalidate_domain(void *data) 697static void sas_revalidate_domain(struct work_struct *work)
696{ 698{
697 int res = 0; 699 int res = 0;
698 struct asd_sas_port *port = data; 700 struct sas_discovery_event *ev =
701 container_of(work, struct sas_discovery_event, work);
702 struct asd_sas_port *port = ev->port;
699 703
700 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, 704 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
701 &port->disc.pending); 705 &port->disc.pending);
@@ -722,7 +726,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
722 BUG_ON(ev >= DISC_NUM_EVENTS); 726 BUG_ON(ev >= DISC_NUM_EVENTS);
723 727
724 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, 728 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
725 &disc->disc_work[ev], port->ha->core.shost); 729 &disc->disc_work[ev].work, port->ha->core.shost);
726 730
727 return 0; 731 return 0;
728} 732}
@@ -737,13 +741,15 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
737{ 741{
738 int i; 742 int i;
739 743
740 static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = { 744 static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
741 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, 745 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
742 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, 746 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
743 }; 747 };
744 748
745 spin_lock_init(&disc->disc_event_lock); 749 spin_lock_init(&disc->disc_event_lock);
746 disc->pending = 0; 750 disc->pending = 0;
747 for (i = 0; i < DISC_NUM_EVENTS; i++) 751 for (i = 0; i < DISC_NUM_EVENTS; i++) {
748 INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port); 752 INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
753 disc->disc_work[i].port = port;
754 }
749} 755}
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 19110ed1c89c..d83392ee6823 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
31 BUG_ON(event >= HA_NUM_EVENTS); 31 BUG_ON(event >= HA_NUM_EVENTS);
32 32
33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, 33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
34 &sas_ha->ha_events[event], sas_ha->core.shost); 34 &sas_ha->ha_events[event].work, sas_ha->core.shost);
35} 35}
36 36
37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) 37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
@@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
41 BUG_ON(event >= PORT_NUM_EVENTS); 41 BUG_ON(event >= PORT_NUM_EVENTS);
42 42
43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, 43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
44 &phy->port_events[event], ha->core.shost); 44 &phy->port_events[event].work, ha->core.shost);
45} 45}
46 46
47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) 47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
@@ -51,12 +51,12 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
51 BUG_ON(event >= PHY_NUM_EVENTS); 51 BUG_ON(event >= PHY_NUM_EVENTS);
52 52
53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, 53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
54 &phy->phy_events[event], ha->core.shost); 54 &phy->phy_events[event].work, ha->core.shost);
55} 55}
56 56
57int sas_init_events(struct sas_ha_struct *sas_ha) 57int sas_init_events(struct sas_ha_struct *sas_ha)
58{ 58{
59 static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = { 59 static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
60 [HAE_RESET] = sas_hae_reset, 60 [HAE_RESET] = sas_hae_reset,
61 }; 61 };
62 62
@@ -64,8 +64,10 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
64 64
65 spin_lock_init(&sas_ha->event_lock); 65 spin_lock_init(&sas_ha->event_lock);
66 66
67 for (i = 0; i < HA_NUM_EVENTS; i++) 67 for (i = 0; i < HA_NUM_EVENTS; i++) {
68 INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha); 68 INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
69 sas_ha->ha_events[i].ha = sas_ha;
70 }
69 71
70 sas_ha->notify_ha_event = notify_ha_event; 72 sas_ha->notify_ha_event = notify_ha_event;
71 sas_ha->notify_port_event = notify_port_event; 73 sas_ha->notify_port_event = notify_port_event;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index c836a237fb79..7b4e9169f44d 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -65,9 +65,11 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
65 65
66/* ---------- HA events ---------- */ 66/* ---------- HA events ---------- */
67 67
68void sas_hae_reset(void *data) 68void sas_hae_reset(struct work_struct *work)
69{ 69{
70 struct sas_ha_struct *ha = data; 70 struct sas_ha_event *ev =
71 container_of(work, struct sas_ha_event, work);
72 struct sas_ha_struct *ha = ev->ha;
71 73
72 sas_begin_event(HAE_RESET, &ha->event_lock, 74 sas_begin_event(HAE_RESET, &ha->event_lock,
73 &ha->pending); 75 &ha->pending);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index bffcee474921..137d7e496b6d 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -60,11 +60,11 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
60 60
61void sas_deform_port(struct asd_sas_phy *phy); 61void sas_deform_port(struct asd_sas_phy *phy);
62 62
63void sas_porte_bytes_dmaed(void *); 63void sas_porte_bytes_dmaed(struct work_struct *work);
64void sas_porte_broadcast_rcvd(void *); 64void sas_porte_broadcast_rcvd(struct work_struct *work);
65void sas_porte_link_reset_err(void *); 65void sas_porte_link_reset_err(struct work_struct *work);
66void sas_porte_timer_event(void *); 66void sas_porte_timer_event(struct work_struct *work);
67void sas_porte_hard_reset(void *); 67void sas_porte_hard_reset(struct work_struct *work);
68 68
69int sas_notify_lldd_dev_found(struct domain_device *); 69int sas_notify_lldd_dev_found(struct domain_device *);
70void sas_notify_lldd_dev_gone(struct domain_device *); 70void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -75,7 +75,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy);
75 75
76struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); 76struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
77 77
78void sas_hae_reset(void *); 78void sas_hae_reset(struct work_struct *work);
79 79
80static inline void sas_queue_event(int event, spinlock_t *lock, 80static inline void sas_queue_event(int event, spinlock_t *lock,
81 unsigned long *pending, 81 unsigned long *pending,
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 9340cdbae4a3..b459c4b635b1 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -30,9 +30,11 @@
30 30
31/* ---------- Phy events ---------- */ 31/* ---------- Phy events ---------- */
32 32
33static void sas_phye_loss_of_signal(void *data) 33static void sas_phye_loss_of_signal(struct work_struct *work)
34{ 34{
35 struct asd_sas_phy *phy = data; 35 struct asd_sas_event *ev =
36 container_of(work, struct asd_sas_event, work);
37 struct asd_sas_phy *phy = ev->phy;
36 38
37 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, 39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
38 &phy->phy_events_pending); 40 &phy->phy_events_pending);
@@ -40,18 +42,22 @@ static void sas_phye_loss_of_signal(void *data)
40 sas_deform_port(phy); 42 sas_deform_port(phy);
41} 43}
42 44
43static void sas_phye_oob_done(void *data) 45static void sas_phye_oob_done(struct work_struct *work)
44{ 46{
45 struct asd_sas_phy *phy = data; 47 struct asd_sas_event *ev =
48 container_of(work, struct asd_sas_event, work);
49 struct asd_sas_phy *phy = ev->phy;
46 50
47 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, 51 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
48 &phy->phy_events_pending); 52 &phy->phy_events_pending);
49 phy->error = 0; 53 phy->error = 0;
50} 54}
51 55
52static void sas_phye_oob_error(void *data) 56static void sas_phye_oob_error(struct work_struct *work)
53{ 57{
54 struct asd_sas_phy *phy = data; 58 struct asd_sas_event *ev =
59 container_of(work, struct asd_sas_event, work);
60 struct asd_sas_phy *phy = ev->phy;
55 struct sas_ha_struct *sas_ha = phy->ha; 61 struct sas_ha_struct *sas_ha = phy->ha;
56 struct asd_sas_port *port = phy->port; 62 struct asd_sas_port *port = phy->port;
57 struct sas_internal *i = 63 struct sas_internal *i =
@@ -80,9 +86,11 @@ static void sas_phye_oob_error(void *data)
80 } 86 }
81} 87}
82 88
83static void sas_phye_spinup_hold(void *data) 89static void sas_phye_spinup_hold(struct work_struct *work)
84{ 90{
85 struct asd_sas_phy *phy = data; 91 struct asd_sas_event *ev =
92 container_of(work, struct asd_sas_event, work);
93 struct asd_sas_phy *phy = ev->phy;
86 struct sas_ha_struct *sas_ha = phy->ha; 94 struct sas_ha_struct *sas_ha = phy->ha;
87 struct sas_internal *i = 95 struct sas_internal *i =
88 to_sas_internal(sas_ha->core.shost->transportt); 96 to_sas_internal(sas_ha->core.shost->transportt);
@@ -100,14 +108,14 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
100{ 108{
101 int i; 109 int i;
102 110
103 static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = { 111 static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
104 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, 112 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
105 [PHYE_OOB_DONE] = sas_phye_oob_done, 113 [PHYE_OOB_DONE] = sas_phye_oob_done,
106 [PHYE_OOB_ERROR] = sas_phye_oob_error, 114 [PHYE_OOB_ERROR] = sas_phye_oob_error,
107 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, 115 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
108 }; 116 };
109 117
110 static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = { 118 static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
111 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, 119 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
112 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, 120 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
113 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, 121 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
@@ -122,13 +130,18 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
122 130
123 phy->error = 0; 131 phy->error = 0;
124 INIT_LIST_HEAD(&phy->port_phy_el); 132 INIT_LIST_HEAD(&phy->port_phy_el);
125 for (k = 0; k < PORT_NUM_EVENTS; k++) 133 for (k = 0; k < PORT_NUM_EVENTS; k++) {
126 INIT_WORK(&phy->port_events[k], sas_port_event_fns[k], 134 INIT_WORK(&phy->port_events[k].work,
127 phy); 135 sas_port_event_fns[k]);
136 phy->port_events[k].phy = phy;
137 }
138
139 for (k = 0; k < PHY_NUM_EVENTS; k++) {
140 INIT_WORK(&phy->phy_events[k].work,
141 sas_phy_event_fns[k]);
142 phy->phy_events[k].phy = phy;
143 }
128 144
129 for (k = 0; k < PHY_NUM_EVENTS; k++)
130 INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
131 phy);
132 phy->port = NULL; 145 phy->port = NULL;
133 phy->ha = sas_ha; 146 phy->ha = sas_ha;
134 spin_lock_init(&phy->frame_rcvd_lock); 147 spin_lock_init(&phy->frame_rcvd_lock);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 253cdcf306a2..971c37ceecb4 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -181,9 +181,11 @@ void sas_deform_port(struct asd_sas_phy *phy)
181 181
182/* ---------- SAS port events ---------- */ 182/* ---------- SAS port events ---------- */
183 183
184void sas_porte_bytes_dmaed(void *data) 184void sas_porte_bytes_dmaed(struct work_struct *work)
185{ 185{
186 struct asd_sas_phy *phy = data; 186 struct asd_sas_event *ev =
187 container_of(work, struct asd_sas_event, work);
188 struct asd_sas_phy *phy = ev->phy;
187 189
188 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, 190 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
189 &phy->port_events_pending); 191 &phy->port_events_pending);
@@ -191,11 +193,13 @@ void sas_porte_bytes_dmaed(void *data)
191 sas_form_port(phy); 193 sas_form_port(phy);
192} 194}
193 195
194void sas_porte_broadcast_rcvd(void *data) 196void sas_porte_broadcast_rcvd(struct work_struct *work)
195{ 197{
198 struct asd_sas_event *ev =
199 container_of(work, struct asd_sas_event, work);
200 struct asd_sas_phy *phy = ev->phy;
196 unsigned long flags; 201 unsigned long flags;
197 u32 prim; 202 u32 prim;
198 struct asd_sas_phy *phy = data;
199 203
200 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, 204 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
201 &phy->port_events_pending); 205 &phy->port_events_pending);
@@ -208,9 +212,11 @@ void sas_porte_broadcast_rcvd(void *data)
208 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); 212 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
209} 213}
210 214
211void sas_porte_link_reset_err(void *data) 215void sas_porte_link_reset_err(struct work_struct *work)
212{ 216{
213 struct asd_sas_phy *phy = data; 217 struct asd_sas_event *ev =
218 container_of(work, struct asd_sas_event, work);
219 struct asd_sas_phy *phy = ev->phy;
214 220
215 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, 221 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
216 &phy->port_events_pending); 222 &phy->port_events_pending);
@@ -218,9 +224,11 @@ void sas_porte_link_reset_err(void *data)
218 sas_deform_port(phy); 224 sas_deform_port(phy);
219} 225}
220 226
221void sas_porte_timer_event(void *data) 227void sas_porte_timer_event(struct work_struct *work)
222{ 228{
223 struct asd_sas_phy *phy = data; 229 struct asd_sas_event *ev =
230 container_of(work, struct asd_sas_event, work);
231 struct asd_sas_phy *phy = ev->phy;
224 232
225 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, 233 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
226 &phy->port_events_pending); 234 &phy->port_events_pending);
@@ -228,9 +236,11 @@ void sas_porte_timer_event(void *data)
228 sas_deform_port(phy); 236 sas_deform_port(phy);
229} 237}
230 238
231void sas_porte_hard_reset(void *data) 239void sas_porte_hard_reset(struct work_struct *work)
232{ 240{
233 struct asd_sas_phy *phy = data; 241 struct asd_sas_event *ev =
242 container_of(work, struct asd_sas_event, work);
243 struct asd_sas_phy *phy = ev->phy;
234 244
235 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, 245 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
236 &phy->port_events_pending); 246 &phy->port_events_pending);
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 89a2a9f11e41..584ba4d6e038 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -31,7 +31,7 @@ typedef struct {
31 int base; /* Actual port address */ 31 int base; /* Actual port address */
32 int mode; /* Transfer mode */ 32 int mode; /* Transfer mode */
33 struct scsi_cmnd *cur_cmd; /* Current queued command */ 33 struct scsi_cmnd *cur_cmd; /* Current queued command */
34 struct work_struct ppa_tq; /* Polling interrupt stuff */ 34 struct delayed_work ppa_tq; /* Polling interrupt stuff */
35 unsigned long jstart; /* Jiffies at start */ 35 unsigned long jstart; /* Jiffies at start */
36 unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ 36 unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */
37 unsigned int failed:1; /* Failure flag */ 37 unsigned int failed:1; /* Failure flag */
@@ -627,9 +627,9 @@ static int ppa_completion(struct scsi_cmnd *cmd)
627 * the scheduler's task queue to generate a stream of call-backs and 627 * the scheduler's task queue to generate a stream of call-backs and
628 * complete the request when the drive is ready. 628 * complete the request when the drive is ready.
629 */ 629 */
630static void ppa_interrupt(void *data) 630static void ppa_interrupt(struct work_struct *work)
631{ 631{
632 ppa_struct *dev = (ppa_struct *) data; 632 ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
633 struct scsi_cmnd *cmd = dev->cur_cmd; 633 struct scsi_cmnd *cmd = dev->cur_cmd;
634 634
635 if (!cmd) { 635 if (!cmd) {
@@ -637,7 +637,6 @@ static void ppa_interrupt(void *data)
637 return; 637 return;
638 } 638 }
639 if (ppa_engine(dev, cmd)) { 639 if (ppa_engine(dev, cmd)) {
640 dev->ppa_tq.data = (void *) dev;
641 schedule_delayed_work(&dev->ppa_tq, 1); 640 schedule_delayed_work(&dev->ppa_tq, 1);
642 return; 641 return;
643 } 642 }
@@ -822,8 +821,7 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd,
822 cmd->result = DID_ERROR << 16; /* default return code */ 821 cmd->result = DID_ERROR << 16; /* default return code */
823 cmd->SCp.phase = 0; /* bus free */ 822 cmd->SCp.phase = 0; /* bus free */
824 823
825 dev->ppa_tq.data = dev; 824 schedule_delayed_work(&dev->ppa_tq, 0);
826 schedule_work(&dev->ppa_tq);
827 825
828 ppa_pb_claim(dev); 826 ppa_pb_claim(dev);
829 827
@@ -1086,7 +1084,7 @@ static int __ppa_attach(struct parport *pb)
1086 else 1084 else
1087 ports = 8; 1085 ports = 8;
1088 1086
1089 INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev); 1087 INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
1090 1088
1091 err = -ENOMEM; 1089 err = -ENOMEM;
1092 host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); 1090 host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5b8db6109536..bbbc9d039baa 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1011,9 +1011,10 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
1011 * the mid-level tries to sleep when it reaches the driver threshold 1011 * the mid-level tries to sleep when it reaches the driver threshold
1012 * "host->can_queue". This can cause a panic if we were in our interrupt code. 1012 * "host->can_queue". This can cause a panic if we were in our interrupt code.
1013 **/ 1013 **/
1014static void qla4xxx_do_dpc(void *data) 1014static void qla4xxx_do_dpc(struct work_struct *work)
1015{ 1015{
1016 struct scsi_qla_host *ha = (struct scsi_qla_host *) data; 1016 struct scsi_qla_host *ha =
1017 container_of(work, struct scsi_qla_host, dpc_work);
1017 struct ddb_entry *ddb_entry, *dtemp; 1018 struct ddb_entry *ddb_entry, *dtemp;
1018 1019
1019 DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n", 1020 DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n",
@@ -1315,7 +1316,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1315 ret = -ENODEV; 1316 ret = -ENODEV;
1316 goto probe_failed; 1317 goto probe_failed;
1317 } 1318 }
1318 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha); 1319 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
1319 1320
1320 ret = request_irq(pdev->irq, qla4xxx_intr_handler, 1321 ret = request_irq(pdev->irq, qla4xxx_intr_handler,
1321 SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha); 1322 SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 94a274645f6f..d3c5e964c964 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -362,9 +362,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
362 goto retry; 362 goto retry;
363} 363}
364 364
365static void scsi_target_reap_usercontext(void *data) 365static void scsi_target_reap_usercontext(struct work_struct *work)
366{ 366{
367 struct scsi_target *starget = data; 367 struct scsi_target *starget =
368 container_of(work, struct scsi_target, ew.work);
368 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 369 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
369 unsigned long flags; 370 unsigned long flags;
370 371
@@ -400,7 +401,7 @@ void scsi_target_reap(struct scsi_target *starget)
400 starget->state = STARGET_DEL; 401 starget->state = STARGET_DEL;
401 spin_unlock_irqrestore(shost->host_lock, flags); 402 spin_unlock_irqrestore(shost->host_lock, flags);
402 execute_in_process_context(scsi_target_reap_usercontext, 403 execute_in_process_context(scsi_target_reap_usercontext,
403 starget, &starget->ew); 404 &starget->ew);
404 return; 405 return;
405 406
406 } 407 }
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e1a91665d1c2..259c90cfa367 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -218,16 +218,16 @@ static void scsi_device_cls_release(struct class_device *class_dev)
218 put_device(&sdev->sdev_gendev); 218 put_device(&sdev->sdev_gendev);
219} 219}
220 220
221static void scsi_device_dev_release_usercontext(void *data) 221static void scsi_device_dev_release_usercontext(struct work_struct *work)
222{ 222{
223 struct device *dev = data;
224 struct scsi_device *sdev; 223 struct scsi_device *sdev;
225 struct device *parent; 224 struct device *parent;
226 struct scsi_target *starget; 225 struct scsi_target *starget;
227 unsigned long flags; 226 unsigned long flags;
228 227
229 parent = dev->parent; 228 sdev = container_of(work, struct scsi_device, ew.work);
230 sdev = to_scsi_device(dev); 229
230 parent = sdev->sdev_gendev.parent;
231 starget = to_scsi_target(parent); 231 starget = to_scsi_target(parent);
232 232
233 spin_lock_irqsave(sdev->host->host_lock, flags); 233 spin_lock_irqsave(sdev->host->host_lock, flags);
@@ -258,7 +258,7 @@ static void scsi_device_dev_release_usercontext(void *data)
258static void scsi_device_dev_release(struct device *dev) 258static void scsi_device_dev_release(struct device *dev)
259{ 259{
260 struct scsi_device *sdp = to_scsi_device(dev); 260 struct scsi_device *sdp = to_scsi_device(dev);
261 execute_in_process_context(scsi_device_dev_release_usercontext, dev, 261 execute_in_process_context(scsi_device_dev_release_usercontext,
262 &sdp->ew); 262 &sdp->ew);
263} 263}
264 264
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 38c215a78f69..3571ce8934e7 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -241,9 +241,9 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
241#define FC_MGMTSRVR_PORTID 0x00000a 241#define FC_MGMTSRVR_PORTID 0x00000a
242 242
243 243
244static void fc_timeout_deleted_rport(void *data); 244static void fc_timeout_deleted_rport(struct work_struct *work);
245static void fc_timeout_fail_rport_io(void *data); 245static void fc_timeout_fail_rport_io(struct work_struct *work);
246static void fc_scsi_scan_rport(void *data); 246static void fc_scsi_scan_rport(struct work_struct *work);
247 247
248/* 248/*
249 * Attribute counts pre object type... 249 * Attribute counts pre object type...
@@ -1613,7 +1613,7 @@ fc_flush_work(struct Scsi_Host *shost)
1613 * 1 on success / 0 already queued / < 0 for error 1613 * 1 on success / 0 already queued / < 0 for error
1614 **/ 1614 **/
1615static int 1615static int
1616fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, 1616fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
1617 unsigned long delay) 1617 unsigned long delay)
1618{ 1618{
1619 if (unlikely(!fc_host_devloss_work_q(shost))) { 1619 if (unlikely(!fc_host_devloss_work_q(shost))) {
@@ -1625,9 +1625,6 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
1625 return -EINVAL; 1625 return -EINVAL;
1626 } 1626 }
1627 1627
1628 if (delay == 0)
1629 return queue_work(fc_host_devloss_work_q(shost), work);
1630
1631 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); 1628 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
1632} 1629}
1633 1630
@@ -1712,12 +1709,13 @@ EXPORT_SYMBOL(fc_remove_host);
1712 * fc_starget_delete - called to delete the scsi decendents of an rport 1709 * fc_starget_delete - called to delete the scsi decendents of an rport
1713 * (target and all sdevs) 1710 * (target and all sdevs)
1714 * 1711 *
1715 * @data: remote port to be operated on. 1712 * @work: remote port to be operated on.
1716 **/ 1713 **/
1717static void 1714static void
1718fc_starget_delete(void *data) 1715fc_starget_delete(struct work_struct *work)
1719{ 1716{
1720 struct fc_rport *rport = (struct fc_rport *)data; 1717 struct fc_rport *rport =
1718 container_of(work, struct fc_rport, stgt_delete_work);
1721 struct Scsi_Host *shost = rport_to_shost(rport); 1719 struct Scsi_Host *shost = rport_to_shost(rport);
1722 unsigned long flags; 1720 unsigned long flags;
1723 struct fc_internal *i = to_fc_internal(shost->transportt); 1721 struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1751,12 +1749,13 @@ fc_starget_delete(void *data)
1751/** 1749/**
1752 * fc_rport_final_delete - finish rport termination and delete it. 1750 * fc_rport_final_delete - finish rport termination and delete it.
1753 * 1751 *
1754 * @data: remote port to be deleted. 1752 * @work: remote port to be deleted.
1755 **/ 1753 **/
1756static void 1754static void
1757fc_rport_final_delete(void *data) 1755fc_rport_final_delete(struct work_struct *work)
1758{ 1756{
1759 struct fc_rport *rport = (struct fc_rport *)data; 1757 struct fc_rport *rport =
1758 container_of(work, struct fc_rport, rport_delete_work);
1760 struct device *dev = &rport->dev; 1759 struct device *dev = &rport->dev;
1761 struct Scsi_Host *shost = rport_to_shost(rport); 1760 struct Scsi_Host *shost = rport_to_shost(rport);
1762 struct fc_internal *i = to_fc_internal(shost->transportt); 1761 struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1770,7 +1769,7 @@ fc_rport_final_delete(void *data)
1770 1769
1771 /* Delete SCSI target and sdevs */ 1770 /* Delete SCSI target and sdevs */
1772 if (rport->scsi_target_id != -1) 1771 if (rport->scsi_target_id != -1)
1773 fc_starget_delete(data); 1772 fc_starget_delete(&rport->stgt_delete_work);
1774 else if (i->f->dev_loss_tmo_callbk) 1773 else if (i->f->dev_loss_tmo_callbk)
1775 i->f->dev_loss_tmo_callbk(rport); 1774 i->f->dev_loss_tmo_callbk(rport);
1776 else if (i->f->terminate_rport_io) 1775 else if (i->f->terminate_rport_io)
@@ -1829,11 +1828,11 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1829 rport->channel = channel; 1828 rport->channel = channel;
1830 rport->fast_io_fail_tmo = -1; 1829 rport->fast_io_fail_tmo = -1;
1831 1830
1832 INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); 1831 INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
1833 INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport); 1832 INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
1834 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); 1833 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
1835 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); 1834 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
1836 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); 1835 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
1837 1836
1838 spin_lock_irqsave(shost->host_lock, flags); 1837 spin_lock_irqsave(shost->host_lock, flags);
1839 1838
@@ -1963,7 +1962,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1963 } 1962 }
1964 1963
1965 if (match) { 1964 if (match) {
1966 struct work_struct *work = 1965 struct delayed_work *work =
1967 &rport->dev_loss_work; 1966 &rport->dev_loss_work;
1968 1967
1969 memcpy(&rport->node_name, &ids->node_name, 1968 memcpy(&rport->node_name, &ids->node_name,
@@ -2267,12 +2266,13 @@ EXPORT_SYMBOL(fc_remote_port_rolechg);
2267 * was a SCSI target (thus was blocked), and failed 2266 * was a SCSI target (thus was blocked), and failed
2268 * to return in the alloted time. 2267 * to return in the alloted time.
2269 * 2268 *
2270 * @data: rport target that failed to reappear in the alloted time. 2269 * @work: rport target that failed to reappear in the alloted time.
2271 **/ 2270 **/
2272static void 2271static void
2273fc_timeout_deleted_rport(void *data) 2272fc_timeout_deleted_rport(struct work_struct *work)
2274{ 2273{
2275 struct fc_rport *rport = (struct fc_rport *)data; 2274 struct fc_rport *rport =
2275 container_of(work, struct fc_rport, dev_loss_work.work);
2276 struct Scsi_Host *shost = rport_to_shost(rport); 2276 struct Scsi_Host *shost = rport_to_shost(rport);
2277 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2277 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2278 unsigned long flags; 2278 unsigned long flags;
@@ -2366,15 +2366,16 @@ fc_timeout_deleted_rport(void *data)
2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a 2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
2367 * disconnected SCSI target. 2367 * disconnected SCSI target.
2368 * 2368 *
2369 * @data: rport to terminate io on. 2369 * @work: rport to terminate io on.
2370 * 2370 *
2371 * Notes: Only requests the failure of the io, not that all are flushed 2371 * Notes: Only requests the failure of the io, not that all are flushed
2372 * prior to returning. 2372 * prior to returning.
2373 **/ 2373 **/
2374static void 2374static void
2375fc_timeout_fail_rport_io(void *data) 2375fc_timeout_fail_rport_io(struct work_struct *work)
2376{ 2376{
2377 struct fc_rport *rport = (struct fc_rport *)data; 2377 struct fc_rport *rport =
2378 container_of(work, struct fc_rport, fail_io_work.work);
2378 struct Scsi_Host *shost = rport_to_shost(rport); 2379 struct Scsi_Host *shost = rport_to_shost(rport);
2379 struct fc_internal *i = to_fc_internal(shost->transportt); 2380 struct fc_internal *i = to_fc_internal(shost->transportt);
2380 2381
@@ -2387,12 +2388,13 @@ fc_timeout_fail_rport_io(void *data)
2387/** 2388/**
2388 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 2389 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
2389 * 2390 *
2390 * @data: remote port to be scanned. 2391 * @work: remote port to be scanned.
2391 **/ 2392 **/
2392static void 2393static void
2393fc_scsi_scan_rport(void *data) 2394fc_scsi_scan_rport(struct work_struct *work)
2394{ 2395{
2395 struct fc_rport *rport = (struct fc_rport *)data; 2396 struct fc_rport *rport =
2397 container_of(work, struct fc_rport, scan_work);
2396 struct Scsi_Host *shost = rport_to_shost(rport); 2398 struct Scsi_Host *shost = rport_to_shost(rport);
2397 unsigned long flags; 2399 unsigned long flags;
2398 2400
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9b25124a989e..9c22f1342715 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -234,9 +234,11 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
234 return 0; 234 return 0;
235} 235}
236 236
237static void session_recovery_timedout(void *data) 237static void session_recovery_timedout(struct work_struct *work)
238{ 238{
239 struct iscsi_cls_session *session = data; 239 struct iscsi_cls_session *session =
240 container_of(work, struct iscsi_cls_session,
241 recovery_work.work);
240 242
241 dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " 243 dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
242 "out after %d secs\n", session->recovery_tmo); 244 "out after %d secs\n", session->recovery_tmo);
@@ -276,7 +278,7 @@ iscsi_alloc_session(struct Scsi_Host *shost,
276 278
277 session->transport = transport; 279 session->transport = transport;
278 session->recovery_tmo = 120; 280 session->recovery_tmo = 120;
279 INIT_WORK(&session->recovery_work, session_recovery_timedout, session); 281 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
280 INIT_LIST_HEAD(&session->host_list); 282 INIT_LIST_HEAD(&session->host_list);
281 INIT_LIST_HEAD(&session->sess_list); 283 INIT_LIST_HEAD(&session->sess_list);
282 284
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 9f070f0d0f2b..3fded4831460 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -964,9 +964,10 @@ struct work_queue_wrapper {
964}; 964};
965 965
966static void 966static void
967spi_dv_device_work_wrapper(void *data) 967spi_dv_device_work_wrapper(struct work_struct *work)
968{ 968{
969 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 969 struct work_queue_wrapper *wqw =
970 container_of(work, struct work_queue_wrapper, work);
970 struct scsi_device *sdev = wqw->sdev; 971 struct scsi_device *sdev = wqw->sdev;
971 972
972 kfree(wqw); 973 kfree(wqw);
@@ -1006,7 +1007,7 @@ spi_schedule_dv_device(struct scsi_device *sdev)
1006 return; 1007 return;
1007 } 1008 }
1008 1009
1009 INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw); 1010 INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
1010 wqw->sdev = sdev; 1011 wqw->sdev = sdev;
1011 1012
1012 schedule_work(&wqw->work); 1013 schedule_work(&wqw->work);
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index a23862ef72b2..08c1c57c6128 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -265,9 +265,10 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
265 * Drivers can provide word-at-a-time i/o primitives, or provide 265 * Drivers can provide word-at-a-time i/o primitives, or provide
266 * transfer-at-a-time ones to leverage dma or fifo hardware. 266 * transfer-at-a-time ones to leverage dma or fifo hardware.
267 */ 267 */
268static void bitbang_work(void *_bitbang) 268static void bitbang_work(struct work_struct *work)
269{ 269{
270 struct spi_bitbang *bitbang = _bitbang; 270 struct spi_bitbang *bitbang =
271 container_of(work, struct spi_bitbang, work);
271 unsigned long flags; 272 unsigned long flags;
272 273
273 spin_lock_irqsave(&bitbang->lock, flags); 274 spin_lock_irqsave(&bitbang->lock, flags);
@@ -456,7 +457,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
456 if (!bitbang->master || !bitbang->chipselect) 457 if (!bitbang->master || !bitbang->chipselect)
457 return -EINVAL; 458 return -EINVAL;
458 459
459 INIT_WORK(&bitbang->work, bitbang_work, bitbang); 460 INIT_WORK(&bitbang->work, bitbang_work);
460 spin_lock_init(&bitbang->lock); 461 spin_lock_init(&bitbang->lock);
461 INIT_LIST_HEAD(&bitbang->queue); 462 INIT_LIST_HEAD(&bitbang->queue);
462 463
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index e6565633ba0f..3dfa3e40e148 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -158,7 +158,7 @@ struct cxacru_data {
158 const struct cxacru_modem_type *modem_type; 158 const struct cxacru_modem_type *modem_type;
159 159
160 int line_status; 160 int line_status;
161 struct work_struct poll_work; 161 struct delayed_work poll_work;
162 162
163 /* contol handles */ 163 /* contol handles */
164 struct mutex cm_serialize; 164 struct mutex cm_serialize;
@@ -347,7 +347,7 @@ static int cxacru_card_status(struct cxacru_data *instance)
347 return 0; 347 return 0;
348} 348}
349 349
350static void cxacru_poll_status(struct cxacru_data *instance); 350static void cxacru_poll_status(struct work_struct *work);
351 351
352static int cxacru_atm_start(struct usbatm_data *usbatm_instance, 352static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
353 struct atm_dev *atm_dev) 353 struct atm_dev *atm_dev)
@@ -376,12 +376,14 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
376 } 376 }
377 377
378 /* Start status polling */ 378 /* Start status polling */
379 cxacru_poll_status(instance); 379 cxacru_poll_status(&instance->poll_work.work);
380 return 0; 380 return 0;
381} 381}
382 382
383static void cxacru_poll_status(struct cxacru_data *instance) 383static void cxacru_poll_status(struct work_struct *work)
384{ 384{
385 struct cxacru_data *instance =
386 container_of(work, struct cxacru_data, poll_work.work);
385 u32 buf[CXINF_MAX] = {}; 387 u32 buf[CXINF_MAX] = {};
386 struct usbatm_data *usbatm = instance->usbatm; 388 struct usbatm_data *usbatm = instance->usbatm;
387 struct atm_dev *atm_dev = usbatm->atm_dev; 389 struct atm_dev *atm_dev = usbatm->atm_dev;
@@ -720,7 +722,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
720 722
721 mutex_init(&instance->cm_serialize); 723 mutex_init(&instance->cm_serialize);
722 724
723 INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance); 725 INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);
724 726
725 usbatm_instance->driver_data = instance; 727 usbatm_instance->driver_data = instance;
726 728
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index a823486495c3..8ed6c75adf0f 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -142,7 +142,7 @@ struct speedtch_instance_data {
142 142
143 struct speedtch_params params; /* set in probe, constant afterwards */ 143 struct speedtch_params params; /* set in probe, constant afterwards */
144 144
145 struct work_struct status_checker; 145 struct delayed_work status_checker;
146 146
147 unsigned char last_status; 147 unsigned char last_status;
148 148
@@ -498,8 +498,11 @@ static int speedtch_start_synchro(struct speedtch_instance_data *instance)
498 return ret; 498 return ret;
499} 499}
500 500
501static void speedtch_check_status(struct speedtch_instance_data *instance) 501static void speedtch_check_status(struct work_struct *work)
502{ 502{
503 struct speedtch_instance_data *instance =
504 container_of(work, struct speedtch_instance_data,
505 status_checker.work);
503 struct usbatm_data *usbatm = instance->usbatm; 506 struct usbatm_data *usbatm = instance->usbatm;
504 struct atm_dev *atm_dev = usbatm->atm_dev; 507 struct atm_dev *atm_dev = usbatm->atm_dev;
505 unsigned char *buf = instance->scratch_buffer; 508 unsigned char *buf = instance->scratch_buffer;
@@ -576,7 +579,7 @@ static void speedtch_status_poll(unsigned long data)
576{ 579{
577 struct speedtch_instance_data *instance = (void *)data; 580 struct speedtch_instance_data *instance = (void *)data;
578 581
579 schedule_work(&instance->status_checker); 582 schedule_delayed_work(&instance->status_checker, 0);
580 583
581 /* The following check is racy, but the race is harmless */ 584 /* The following check is racy, but the race is harmless */
582 if (instance->poll_delay < MAX_POLL_DELAY) 585 if (instance->poll_delay < MAX_POLL_DELAY)
@@ -596,7 +599,7 @@ static void speedtch_resubmit_int(unsigned long data)
596 if (int_urb) { 599 if (int_urb) {
597 ret = usb_submit_urb(int_urb, GFP_ATOMIC); 600 ret = usb_submit_urb(int_urb, GFP_ATOMIC);
598 if (!ret) 601 if (!ret)
599 schedule_work(&instance->status_checker); 602 schedule_delayed_work(&instance->status_checker, 0);
600 else { 603 else {
601 atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); 604 atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
602 mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); 605 mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
@@ -640,7 +643,7 @@ static void speedtch_handle_int(struct urb *int_urb)
640 643
641 if ((int_urb = instance->int_urb)) { 644 if ((int_urb = instance->int_urb)) {
642 ret = usb_submit_urb(int_urb, GFP_ATOMIC); 645 ret = usb_submit_urb(int_urb, GFP_ATOMIC);
643 schedule_work(&instance->status_checker); 646 schedule_delayed_work(&instance->status_checker, 0);
644 if (ret < 0) { 647 if (ret < 0) {
645 atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); 648 atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
646 goto fail; 649 goto fail;
@@ -855,7 +858,7 @@ static int speedtch_bind(struct usbatm_data *usbatm,
855 858
856 usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); 859 usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
857 860
858 INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance); 861 INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
859 862
860 instance->status_checker.timer.function = speedtch_status_poll; 863 instance->status_checker.timer.function = speedtch_status_poll;
861 instance->status_checker.timer.data = (unsigned long)instance; 864 instance->status_checker.timer.data = (unsigned long)instance;
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index c137c041f7a4..f2d196fa1e8b 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -655,9 +655,9 @@ static int request_dsp(struct uea_softc *sc)
655/* 655/*
656 * The uea_load_page() function must be called within a process context 656 * The uea_load_page() function must be called within a process context
657 */ 657 */
658static void uea_load_page(void *xsc) 658static void uea_load_page(struct work_struct *work)
659{ 659{
660 struct uea_softc *sc = xsc; 660 struct uea_softc *sc = container_of(work, struct uea_softc, task);
661 u16 pageno = sc->pageno; 661 u16 pageno = sc->pageno;
662 u16 ovl = sc->ovl; 662 u16 ovl = sc->ovl;
663 struct block_info bi; 663 struct block_info bi;
@@ -1348,7 +1348,7 @@ static int uea_boot(struct uea_softc *sc)
1348 1348
1349 uea_enters(INS_TO_USBDEV(sc)); 1349 uea_enters(INS_TO_USBDEV(sc));
1350 1350
1351 INIT_WORK(&sc->task, uea_load_page, sc); 1351 INIT_WORK(&sc->task, uea_load_page);
1352 init_waitqueue_head(&sc->sync_q); 1352 init_waitqueue_head(&sc->sync_q);
1353 init_waitqueue_head(&sc->cmv_ack_wait); 1353 init_waitqueue_head(&sc->cmv_ack_wait);
1354 1354
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ec3438dc8ee5..7f1fa956dcdb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -421,9 +421,9 @@ static void acm_write_bulk(struct urb *urb)
421 schedule_work(&acm->work); 421 schedule_work(&acm->work);
422} 422}
423 423
424static void acm_softint(void *private) 424static void acm_softint(struct work_struct *work)
425{ 425{
426 struct acm *acm = private; 426 struct acm *acm = container_of(work, struct acm, work);
427 dbg("Entering acm_softint."); 427 dbg("Entering acm_softint.");
428 428
429 if (!ACM_READY(acm)) 429 if (!ACM_READY(acm))
@@ -927,7 +927,7 @@ skip_normal_probe:
927 acm->rx_buflimit = num_rx_buf; 927 acm->rx_buflimit = num_rx_buf;
928 acm->urb_task.func = acm_rx_tasklet; 928 acm->urb_task.func = acm_rx_tasklet;
929 acm->urb_task.data = (unsigned long) acm; 929 acm->urb_task.data = (unsigned long) acm;
930 INIT_WORK(&acm->work, acm_softint, acm); 930 INIT_WORK(&acm->work, acm_softint);
931 spin_lock_init(&acm->throttle_lock); 931 spin_lock_init(&acm->throttle_lock);
932 spin_lock_init(&acm->write_lock); 932 spin_lock_init(&acm->write_lock);
933 spin_lock_init(&acm->read_lock); 933 spin_lock_init(&acm->read_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0ce393eb3c4b..39186db1015f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -218,9 +218,10 @@ static void set_port_led(
218 218
219#define LED_CYCLE_PERIOD ((2*HZ)/3) 219#define LED_CYCLE_PERIOD ((2*HZ)/3)
220 220
221static void led_work (void *__hub) 221static void led_work (struct work_struct *work)
222{ 222{
223 struct usb_hub *hub = __hub; 223 struct usb_hub *hub =
224 container_of(work, struct usb_hub, leds.work);
224 struct usb_device *hdev = hub->hdev; 225 struct usb_device *hdev = hub->hdev;
225 unsigned i; 226 unsigned i;
226 unsigned changed = 0; 227 unsigned changed = 0;
@@ -405,9 +406,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
405 * talking to TTs must queue control transfers (not just bulk and iso), so 406 * talking to TTs must queue control transfers (not just bulk and iso), so
406 * both can talk to the same hub concurrently. 407 * both can talk to the same hub concurrently.
407 */ 408 */
408static void hub_tt_kevent (void *arg) 409static void hub_tt_kevent (struct work_struct *work)
409{ 410{
410 struct usb_hub *hub = arg; 411 struct usb_hub *hub =
412 container_of(work, struct usb_hub, tt.kevent);
411 unsigned long flags; 413 unsigned long flags;
412 414
413 spin_lock_irqsave (&hub->tt.lock, flags); 415 spin_lock_irqsave (&hub->tt.lock, flags);
@@ -694,7 +696,7 @@ static int hub_configure(struct usb_hub *hub,
694 696
695 spin_lock_init (&hub->tt.lock); 697 spin_lock_init (&hub->tt.lock);
696 INIT_LIST_HEAD (&hub->tt.clear_list); 698 INIT_LIST_HEAD (&hub->tt.clear_list);
697 INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub); 699 INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
698 switch (hdev->descriptor.bDeviceProtocol) { 700 switch (hdev->descriptor.bDeviceProtocol) {
699 case 0: 701 case 0:
700 break; 702 break;
@@ -938,7 +940,7 @@ descriptor_error:
938 INIT_LIST_HEAD(&hub->event_list); 940 INIT_LIST_HEAD(&hub->event_list);
939 hub->intfdev = &intf->dev; 941 hub->intfdev = &intf->dev;
940 hub->hdev = hdev; 942 hub->hdev = hdev;
941 INIT_WORK(&hub->leds, led_work, hub); 943 INIT_DELAYED_WORK(&hub->leds, led_work);
942 944
943 usb_set_intfdata (intf, hub); 945 usb_set_intfdata (intf, hub);
944 intf->needs_remote_wakeup = 1; 946 intf->needs_remote_wakeup = 1;
@@ -2381,7 +2383,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
2381 /* hub LEDs are probably harder to miss than syslog */ 2383 /* hub LEDs are probably harder to miss than syslog */
2382 if (hub->has_indicators) { 2384 if (hub->has_indicators) {
2383 hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; 2385 hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
2384 schedule_work (&hub->leds); 2386 schedule_delayed_work (&hub->leds, 0);
2385 } 2387 }
2386 } 2388 }
2387 kfree(qual); 2389 kfree(qual);
@@ -2555,7 +2557,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
2555 if (hub->has_indicators) { 2557 if (hub->has_indicators) {
2556 hub->indicator[port1-1] = 2558 hub->indicator[port1-1] =
2557 INDICATOR_AMBER_BLINK; 2559 INDICATOR_AMBER_BLINK;
2558 schedule_work (&hub->leds); 2560 schedule_delayed_work (&hub->leds, 0);
2559 } 2561 }
2560 status = -ENOTCONN; /* Don't retry */ 2562 status = -ENOTCONN; /* Don't retry */
2561 goto loop_disable; 2563 goto loop_disable;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 29b0fa9ff9d0..7390b67c609d 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1501,9 +1501,10 @@ struct set_config_request {
1501}; 1501};
1502 1502
1503/* Worker routine for usb_driver_set_configuration() */ 1503/* Worker routine for usb_driver_set_configuration() */
1504static void driver_set_config_work(void *_req) 1504static void driver_set_config_work(struct work_struct *work)
1505{ 1505{
1506 struct set_config_request *req = _req; 1506 struct set_config_request *req =
1507 container_of(work, struct set_config_request, work);
1507 1508
1508 usb_lock_device(req->udev); 1509 usb_lock_device(req->udev);
1509 usb_set_configuration(req->udev, req->config); 1510 usb_set_configuration(req->udev, req->config);
@@ -1541,7 +1542,7 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
1541 return -ENOMEM; 1542 return -ENOMEM;
1542 req->udev = udev; 1543 req->udev = udev;
1543 req->config = config; 1544 req->config = config;
1544 INIT_WORK(&req->work, driver_set_config_work, req); 1545 INIT_WORK(&req->work, driver_set_config_work);
1545 1546
1546 usb_get_dev(udev); 1547 usb_get_dev(udev);
1547 if (!schedule_work(&req->work)) { 1548 if (!schedule_work(&req->work)) {
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 81cb52564e68..02426d0b9a34 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -203,9 +203,10 @@ static void ksuspend_usb_cleanup(void)
203#ifdef CONFIG_USB_SUSPEND 203#ifdef CONFIG_USB_SUSPEND
204 204
205/* usb_autosuspend_work - callback routine to autosuspend a USB device */ 205/* usb_autosuspend_work - callback routine to autosuspend a USB device */
206static void usb_autosuspend_work(void *_udev) 206static void usb_autosuspend_work(struct work_struct *work)
207{ 207{
208 struct usb_device *udev = _udev; 208 struct usb_device *udev =
209 container_of(work, struct usb_device, autosuspend.work);
209 210
210 usb_pm_lock(udev); 211 usb_pm_lock(udev);
211 udev->auto_pm = 1; 212 udev->auto_pm = 1;
@@ -215,7 +216,7 @@ static void usb_autosuspend_work(void *_udev)
215 216
216#else 217#else
217 218
218static void usb_autosuspend_work(void *_udev) 219static void usb_autosuspend_work(struct work_struct *work)
219{} 220{}
220 221
221#endif /* CONFIG_USB_SUSPEND */ 222#endif /* CONFIG_USB_SUSPEND */
@@ -304,7 +305,7 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
304 305
305#ifdef CONFIG_PM 306#ifdef CONFIG_PM
306 mutex_init(&dev->pm_mutex); 307 mutex_init(&dev->pm_mutex);
307 INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev); 308 INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
308#endif 309#endif
309 return dev; 310 return dev;
310} 311}
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 3bd1dfe565c1..d15bf22b9a03 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -1833,9 +1833,9 @@ static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags)
1833 spin_unlock_irqrestore(&dev->req_lock, flags); 1833 spin_unlock_irqrestore(&dev->req_lock, flags);
1834} 1834}
1835 1835
1836static void eth_work (void *_dev) 1836static void eth_work (struct work_struct *work)
1837{ 1837{
1838 struct eth_dev *dev = _dev; 1838 struct eth_dev *dev = container_of(work, struct eth_dev, work);
1839 1839
1840 if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) { 1840 if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
1841 if (netif_running (dev->net)) 1841 if (netif_running (dev->net))
@@ -2398,7 +2398,7 @@ autoconf_fail:
2398 dev = netdev_priv(net); 2398 dev = netdev_priv(net);
2399 spin_lock_init (&dev->lock); 2399 spin_lock_init (&dev->lock);
2400 spin_lock_init (&dev->req_lock); 2400 spin_lock_init (&dev->req_lock);
2401 INIT_WORK (&dev->work, eth_work, dev); 2401 INIT_WORK (&dev->work, eth_work);
2402 INIT_LIST_HEAD (&dev->tx_reqs); 2402 INIT_LIST_HEAD (&dev->tx_reqs);
2403 INIT_LIST_HEAD (&dev->rx_reqs); 2403 INIT_LIST_HEAD (&dev->rx_reqs);
2404 2404
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index ef54e310bfc4..a9d7119e3176 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -163,7 +163,7 @@ struct u132_endp {
163 u16 queue_next; 163 u16 queue_next;
164 struct urb *urb_list[ENDP_QUEUE_SIZE]; 164 struct urb *urb_list[ENDP_QUEUE_SIZE];
165 struct list_head urb_more; 165 struct list_head urb_more;
166 struct work_struct scheduler; 166 struct delayed_work scheduler;
167}; 167};
168struct u132_ring { 168struct u132_ring {
169 unsigned in_use:1; 169 unsigned in_use:1;
@@ -171,7 +171,7 @@ struct u132_ring {
171 u8 number; 171 u8 number;
172 struct u132 *u132; 172 struct u132 *u132;
173 struct u132_endp *curr_endp; 173 struct u132_endp *curr_endp;
174 struct work_struct scheduler; 174 struct delayed_work scheduler;
175}; 175};
176#define OHCI_QUIRK_AMD756 0x01 176#define OHCI_QUIRK_AMD756 0x01
177#define OHCI_QUIRK_SUPERIO 0x02 177#define OHCI_QUIRK_SUPERIO 0x02
@@ -198,7 +198,7 @@ struct u132 {
198 u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; 198 u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
199 int flags; 199 int flags;
200 unsigned long next_statechange; 200 unsigned long next_statechange;
201 struct work_struct monitor; 201 struct delayed_work monitor;
202 int num_endpoints; 202 int num_endpoints;
203 struct u132_addr addr[MAX_U132_ADDRS]; 203 struct u132_addr addr[MAX_U132_ADDRS];
204 struct u132_udev udev[MAX_U132_UDEVS]; 204 struct u132_udev udev[MAX_U132_UDEVS];
@@ -310,7 +310,7 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
310 if (delta > 0) { 310 if (delta > 0) {
311 if (queue_delayed_work(workqueue, &ring->scheduler, delta)) 311 if (queue_delayed_work(workqueue, &ring->scheduler, delta))
312 return; 312 return;
313 } else if (queue_work(workqueue, &ring->scheduler)) 313 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
314 return; 314 return;
315 kref_put(&u132->kref, u132_hcd_delete); 315 kref_put(&u132->kref, u132_hcd_delete);
316 return; 316 return;
@@ -389,12 +389,8 @@ static inline void u132_endp_init_kref(struct u132 *u132,
389static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, 389static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
390 unsigned int delta) 390 unsigned int delta)
391{ 391{
392 if (delta > 0) { 392 if (queue_delayed_work(workqueue, &endp->scheduler, delta))
393 if (queue_delayed_work(workqueue, &endp->scheduler, delta)) 393 kref_get(&endp->kref);
394 kref_get(&endp->kref);
395 } else if (queue_work(workqueue, &endp->scheduler))
396 kref_get(&endp->kref);
397 return;
398} 394}
399 395
400static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) 396static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
@@ -410,24 +406,14 @@ static inline void u132_monitor_put_kref(struct u132 *u132)
410 406
411static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) 407static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
412{ 408{
413 if (delta > 0) { 409 if (queue_delayed_work(workqueue, &u132->monitor, delta))
414 if (queue_delayed_work(workqueue, &u132->monitor, delta)) { 410 kref_get(&u132->kref);
415 kref_get(&u132->kref);
416 }
417 } else if (queue_work(workqueue, &u132->monitor))
418 kref_get(&u132->kref);
419 return;
420} 411}
421 412
422static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) 413static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
423{ 414{
424 if (delta > 0) { 415 if (!queue_delayed_work(workqueue, &u132->monitor, delta))
425 if (queue_delayed_work(workqueue, &u132->monitor, delta)) 416 kref_put(&u132->kref, u132_hcd_delete);
426 return;
427 } else if (queue_work(workqueue, &u132->monitor))
428 return;
429 kref_put(&u132->kref, u132_hcd_delete);
430 return;
431} 417}
432 418
433static void u132_monitor_cancel_work(struct u132 *u132) 419static void u132_monitor_cancel_work(struct u132 *u132)
@@ -489,9 +475,9 @@ static int read_roothub_info(struct u132 *u132)
489 return 0; 475 return 0;
490} 476}
491 477
492static void u132_hcd_monitor_work(void *data) 478static void u132_hcd_monitor_work(struct work_struct *work)
493{ 479{
494 struct u132 *u132 = data; 480 struct u132 *u132 = container_of(work, struct u132, monitor.work);
495 if (u132->going > 1) { 481 if (u132->going > 1) {
496 dev_err(&u132->platform_dev->dev, "device has been removed %d\n" 482 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
497 , u132->going); 483 , u132->going);
@@ -1315,15 +1301,14 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
1315 } 1301 }
1316} 1302}
1317 1303
1318static void u132_hcd_ring_work_scheduler(void *data);
1319static void u132_hcd_endp_work_scheduler(void *data);
1320/* 1304/*
1321* this work function is only executed from the work queue 1305* this work function is only executed from the work queue
1322* 1306*
1323*/ 1307*/
1324static void u132_hcd_ring_work_scheduler(void *data) 1308static void u132_hcd_ring_work_scheduler(struct work_struct *work)
1325{ 1309{
1326 struct u132_ring *ring = data; 1310 struct u132_ring *ring =
1311 container_of(work, struct u132_ring, scheduler.work);
1327 struct u132 *u132 = ring->u132; 1312 struct u132 *u132 = ring->u132;
1328 down(&u132->scheduler_lock); 1313 down(&u132->scheduler_lock);
1329 if (ring->in_use) { 1314 if (ring->in_use) {
@@ -1382,10 +1367,11 @@ static void u132_hcd_ring_work_scheduler(void *data)
1382 } 1367 }
1383} 1368}
1384 1369
1385static void u132_hcd_endp_work_scheduler(void *data) 1370static void u132_hcd_endp_work_scheduler(struct work_struct *work)
1386{ 1371{
1387 struct u132_ring *ring; 1372 struct u132_ring *ring;
1388 struct u132_endp *endp = data; 1373 struct u132_endp *endp =
1374 container_of(work, struct u132_endp, scheduler.work);
1389 struct u132 *u132 = endp->u132; 1375 struct u132 *u132 = endp->u132;
1390 down(&u132->scheduler_lock); 1376 down(&u132->scheduler_lock);
1391 ring = endp->ring; 1377 ring = endp->ring;
@@ -1943,7 +1929,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
1943 if (!endp) { 1929 if (!endp) {
1944 return -ENOMEM; 1930 return -ENOMEM;
1945 } 1931 }
1946 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 1932 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1947 spin_lock_init(&endp->queue_lock.slock); 1933 spin_lock_init(&endp->queue_lock.slock);
1948 INIT_LIST_HEAD(&endp->urb_more); 1934 INIT_LIST_HEAD(&endp->urb_more);
1949 ring = endp->ring = &u132->ring[0]; 1935 ring = endp->ring = &u132->ring[0];
@@ -2032,7 +2018,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
2032 if (!endp) { 2018 if (!endp) {
2033 return -ENOMEM; 2019 return -ENOMEM;
2034 } 2020 }
2035 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2021 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2036 spin_lock_init(&endp->queue_lock.slock); 2022 spin_lock_init(&endp->queue_lock.slock);
2037 INIT_LIST_HEAD(&endp->urb_more); 2023 INIT_LIST_HEAD(&endp->urb_more);
2038 endp->dequeueing = 0; 2024 endp->dequeueing = 0;
@@ -2117,7 +2103,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
2117 if (!endp) { 2103 if (!endp) {
2118 return -ENOMEM; 2104 return -ENOMEM;
2119 } 2105 }
2120 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2106 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2121 spin_lock_init(&endp->queue_lock.slock); 2107 spin_lock_init(&endp->queue_lock.slock);
2122 INIT_LIST_HEAD(&endp->urb_more); 2108 INIT_LIST_HEAD(&endp->urb_more);
2123 ring = endp->ring = &u132->ring[0]; 2109 ring = endp->ring = &u132->ring[0];
@@ -3096,10 +3082,10 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
3096 ring->number = rings + 1; 3082 ring->number = rings + 1;
3097 ring->length = 0; 3083 ring->length = 0;
3098 ring->curr_endp = NULL; 3084 ring->curr_endp = NULL;
3099 INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler, 3085 INIT_DELAYED_WORK(&ring->scheduler,
3100 (void *)ring); 3086 u132_hcd_ring_work_scheduler);
3101 } down(&u132->sw_lock); 3087 } down(&u132->sw_lock);
3102 INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132); 3088 INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
3103 while (ports-- > 0) { 3089 while (ports-- > 0) {
3104 struct u132_port *port = &u132->port[ports]; 3090 struct u132_port *port = &u132->port[ports];
3105 port->u132 = u132; 3091 port->u132 = u132;
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index a49644b7c58e..4295bab4f1e2 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -969,9 +969,10 @@ static void hid_retry_timeout(unsigned long _hid)
969} 969}
970 970
971/* Workqueue routine to reset the device or clear a halt */ 971/* Workqueue routine to reset the device or clear a halt */
972static void hid_reset(void *_hid) 972static void hid_reset(struct work_struct *work)
973{ 973{
974 struct hid_device *hid = (struct hid_device *) _hid; 974 struct hid_device *hid =
975 container_of(work, struct hid_device, reset_work);
975 int rc_lock, rc = 0; 976 int rc_lock, rc = 0;
976 977
977 if (test_bit(HID_CLEAR_HALT, &hid->iofl)) { 978 if (test_bit(HID_CLEAR_HALT, &hid->iofl)) {
@@ -2043,7 +2044,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
2043 2044
2044 init_waitqueue_head(&hid->wait); 2045 init_waitqueue_head(&hid->wait);
2045 2046
2046 INIT_WORK(&hid->reset_work, hid_reset, hid); 2047 INIT_WORK(&hid->reset_work, hid_reset);
2047 setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid); 2048 setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid);
2048 2049
2049 spin_lock_init(&hid->inlock); 2050 spin_lock_init(&hid->inlock);
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index cb0ba3107d7f..18b1925032a8 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -156,9 +156,9 @@ struct usb_ftdi {
156 struct usb_device *udev; 156 struct usb_device *udev;
157 struct usb_interface *interface; 157 struct usb_interface *interface;
158 struct usb_class_driver *class; 158 struct usb_class_driver *class;
159 struct work_struct status_work; 159 struct delayed_work status_work;
160 struct work_struct command_work; 160 struct delayed_work command_work;
161 struct work_struct respond_work; 161 struct delayed_work respond_work;
162 struct u132_platform_data platform_data; 162 struct u132_platform_data platform_data;
163 struct resource resources[0]; 163 struct resource resources[0];
164 struct platform_device platform_dev; 164 struct platform_device platform_dev;
@@ -210,23 +210,14 @@ static void ftdi_elan_init_kref(struct usb_ftdi *ftdi)
210 210
211static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) 211static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
212{ 212{
213 if (delta > 0) { 213 if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
214 if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) 214 kref_put(&ftdi->kref, ftdi_elan_delete);
215 return;
216 } else if (queue_work(status_queue, &ftdi->status_work))
217 return;
218 kref_put(&ftdi->kref, ftdi_elan_delete);
219 return;
220} 215}
221 216
222static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta) 217static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
223{ 218{
224 if (delta > 0) { 219 if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
225 if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) 220 kref_get(&ftdi->kref);
226 kref_get(&ftdi->kref);
227 } else if (queue_work(status_queue, &ftdi->status_work))
228 kref_get(&ftdi->kref);
229 return;
230} 221}
231 222
232static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) 223static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
@@ -237,25 +228,14 @@ static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
237 228
238static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) 229static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
239{ 230{
240 if (delta > 0) { 231 if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
241 if (queue_delayed_work(command_queue, &ftdi->command_work, 232 kref_put(&ftdi->kref, ftdi_elan_delete);
242 delta))
243 return;
244 } else if (queue_work(command_queue, &ftdi->command_work))
245 return;
246 kref_put(&ftdi->kref, ftdi_elan_delete);
247 return;
248} 233}
249 234
250static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta) 235static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
251{ 236{
252 if (delta > 0) { 237 if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
253 if (queue_delayed_work(command_queue, &ftdi->command_work, 238 kref_get(&ftdi->kref);
254 delta))
255 kref_get(&ftdi->kref);
256 } else if (queue_work(command_queue, &ftdi->command_work))
257 kref_get(&ftdi->kref);
258 return;
259} 239}
260 240
261static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) 241static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
@@ -267,25 +247,14 @@ static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
267static void ftdi_response_requeue_work(struct usb_ftdi *ftdi, 247static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
268 unsigned int delta) 248 unsigned int delta)
269{ 249{
270 if (delta > 0) { 250 if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
271 if (queue_delayed_work(respond_queue, &ftdi->respond_work, 251 kref_put(&ftdi->kref, ftdi_elan_delete);
272 delta))
273 return;
274 } else if (queue_work(respond_queue, &ftdi->respond_work))
275 return;
276 kref_put(&ftdi->kref, ftdi_elan_delete);
277 return;
278} 252}
279 253
280static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta) 254static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
281{ 255{
282 if (delta > 0) { 256 if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
283 if (queue_delayed_work(respond_queue, &ftdi->respond_work, 257 kref_get(&ftdi->kref);
284 delta))
285 kref_get(&ftdi->kref);
286 } else if (queue_work(respond_queue, &ftdi->respond_work))
287 kref_get(&ftdi->kref);
288 return;
289} 258}
290 259
291static void ftdi_response_cancel_work(struct usb_ftdi *ftdi) 260static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
@@ -475,9 +444,11 @@ static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi)
475 return; 444 return;
476} 445}
477 446
478static void ftdi_elan_command_work(void *data) 447static void ftdi_elan_command_work(struct work_struct *work)
479{ 448{
480 struct usb_ftdi *ftdi = data; 449 struct usb_ftdi *ftdi =
450 container_of(work, struct usb_ftdi, command_work.work);
451
481 if (ftdi->disconnected > 0) { 452 if (ftdi->disconnected > 0) {
482 ftdi_elan_put_kref(ftdi); 453 ftdi_elan_put_kref(ftdi);
483 return; 454 return;
@@ -500,9 +471,10 @@ static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi)
500 return; 471 return;
501} 472}
502 473
503static void ftdi_elan_respond_work(void *data) 474static void ftdi_elan_respond_work(struct work_struct *work)
504{ 475{
505 struct usb_ftdi *ftdi = data; 476 struct usb_ftdi *ftdi =
477 container_of(work, struct usb_ftdi, respond_work.work);
506 if (ftdi->disconnected > 0) { 478 if (ftdi->disconnected > 0) {
507 ftdi_elan_put_kref(ftdi); 479 ftdi_elan_put_kref(ftdi);
508 return; 480 return;
@@ -534,9 +506,10 @@ static void ftdi_elan_respond_work(void *data)
534* after the FTDI has been synchronized 506* after the FTDI has been synchronized
535* 507*
536*/ 508*/
537static void ftdi_elan_status_work(void *data) 509static void ftdi_elan_status_work(struct work_struct *work)
538{ 510{
539 struct usb_ftdi *ftdi = data; 511 struct usb_ftdi *ftdi =
512 container_of(work, struct usb_ftdi, status_work.work);
540 int work_delay_in_msec = 0; 513 int work_delay_in_msec = 0;
541 if (ftdi->disconnected > 0) { 514 if (ftdi->disconnected > 0) {
542 ftdi_elan_put_kref(ftdi); 515 ftdi_elan_put_kref(ftdi);
@@ -2677,12 +2650,9 @@ static int ftdi_elan_probe(struct usb_interface *interface,
2677 ftdi->class = NULL; 2650 ftdi->class = NULL;
2678 dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a" 2651 dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a"
2679 "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber); 2652 "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber);
2680 INIT_WORK(&ftdi->status_work, ftdi_elan_status_work, 2653 INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work);
2681 (void *)ftdi); 2654 INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work);
2682 INIT_WORK(&ftdi->command_work, ftdi_elan_command_work, 2655 INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work);
2683 (void *)ftdi);
2684 INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work,
2685 (void *)ftdi);
2686 ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000)); 2656 ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000));
2687 return 0; 2657 return 0;
2688 } else { 2658 } else {
diff --git a/drivers/usb/misc/phidgetkit.c b/drivers/usb/misc/phidgetkit.c
index 9110793f81d3..9659c79e187e 100644
--- a/drivers/usb/misc/phidgetkit.c
+++ b/drivers/usb/misc/phidgetkit.c
@@ -81,8 +81,8 @@ struct interfacekit {
81 unsigned char *data; 81 unsigned char *data;
82 dma_addr_t data_dma; 82 dma_addr_t data_dma;
83 83
84 struct work_struct do_notify; 84 struct delayed_work do_notify;
85 struct work_struct do_resubmit; 85 struct delayed_work do_resubmit;
86 unsigned long input_events; 86 unsigned long input_events;
87 unsigned long sensor_events; 87 unsigned long sensor_events;
88}; 88};
@@ -374,7 +374,7 @@ static void interfacekit_irq(struct urb *urb)
374 } 374 }
375 375
376 if (kit->input_events || kit->sensor_events) 376 if (kit->input_events || kit->sensor_events)
377 schedule_work(&kit->do_notify); 377 schedule_delayed_work(&kit->do_notify, 0);
378 378
379resubmit: 379resubmit:
380 status = usb_submit_urb(urb, SLAB_ATOMIC); 380 status = usb_submit_urb(urb, SLAB_ATOMIC);
@@ -384,9 +384,10 @@ resubmit:
384 kit->udev->devpath, status); 384 kit->udev->devpath, status);
385} 385}
386 386
387static void do_notify(void *data) 387static void do_notify(struct work_struct *work)
388{ 388{
389 struct interfacekit *kit = data; 389 struct interfacekit *kit =
390 container_of(work, struct interfacekit, do_notify.work);
390 int i; 391 int i;
391 char sysfs_file[8]; 392 char sysfs_file[8];
392 393
@@ -405,9 +406,11 @@ static void do_notify(void *data)
405 } 406 }
406} 407}
407 408
408static void do_resubmit(void *data) 409static void do_resubmit(struct work_struct *work)
409{ 410{
410 set_outputs(data); 411 struct interfacekit *kit =
412 container_of(work, struct interfacekit, do_resubmit.work);
413 set_outputs(kit);
411} 414}
412 415
413#define show_set_output(value) \ 416#define show_set_output(value) \
@@ -575,8 +578,8 @@ static int interfacekit_probe(struct usb_interface *intf, const struct usb_devic
575 578
576 kit->udev = usb_get_dev(dev); 579 kit->udev = usb_get_dev(dev);
577 kit->intf = intf; 580 kit->intf = intf;
578 INIT_WORK(&kit->do_notify, do_notify, kit); 581 INIT_DELAYED_WORK(&kit->do_notify, do_notify);
579 INIT_WORK(&kit->do_resubmit, do_resubmit, kit); 582 INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit);
580 usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data, 583 usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data,
581 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, 584 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
582 interfacekit_irq, kit, endpoint->bInterval); 585 interfacekit_irq, kit, endpoint->bInterval);
diff --git a/drivers/usb/misc/phidgetmotorcontrol.c b/drivers/usb/misc/phidgetmotorcontrol.c
index c3469b0a67c2..2bb4fa572bb7 100644
--- a/drivers/usb/misc/phidgetmotorcontrol.c
+++ b/drivers/usb/misc/phidgetmotorcontrol.c
@@ -41,7 +41,7 @@ struct motorcontrol {
41 unsigned char *data; 41 unsigned char *data;
42 dma_addr_t data_dma; 42 dma_addr_t data_dma;
43 43
44 struct work_struct do_notify; 44 struct delayed_work do_notify;
45 unsigned long input_events; 45 unsigned long input_events;
46 unsigned long speed_events; 46 unsigned long speed_events;
47 unsigned long exceed_events; 47 unsigned long exceed_events;
@@ -148,7 +148,7 @@ static void motorcontrol_irq(struct urb *urb)
148 set_bit(1, &mc->exceed_events); 148 set_bit(1, &mc->exceed_events);
149 149
150 if (mc->input_events || mc->exceed_events || mc->speed_events) 150 if (mc->input_events || mc->exceed_events || mc->speed_events)
151 schedule_work(&mc->do_notify); 151 schedule_delayed_work(&mc->do_notify, 0);
152 152
153resubmit: 153resubmit:
154 status = usb_submit_urb(urb, SLAB_ATOMIC); 154 status = usb_submit_urb(urb, SLAB_ATOMIC);
@@ -159,9 +159,10 @@ resubmit:
159 mc->udev->devpath, status); 159 mc->udev->devpath, status);
160} 160}
161 161
162static void do_notify(void *data) 162static void do_notify(struct work_struct *work)
163{ 163{
164 struct motorcontrol *mc = data; 164 struct motorcontrol *mc =
165 container_of(work, struct motorcontrol, do_notify.work);
165 int i; 166 int i;
166 char sysfs_file[8]; 167 char sysfs_file[8];
167 168
@@ -348,7 +349,7 @@ static int motorcontrol_probe(struct usb_interface *intf, const struct usb_devic
348 mc->udev = usb_get_dev(dev); 349 mc->udev = usb_get_dev(dev);
349 mc->intf = intf; 350 mc->intf = intf;
350 mc->acceleration[0] = mc->acceleration[1] = 10; 351 mc->acceleration[0] = mc->acceleration[1] = 10;
351 INIT_WORK(&mc->do_notify, do_notify, mc); 352 INIT_DELAYED_WORK(&mc->do_notify, do_notify);
352 usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data, 353 usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data,
353 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, 354 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
354 motorcontrol_irq, mc, endpoint->bInterval); 355 motorcontrol_irq, mc, endpoint->bInterval);
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index 7c906a43e497..fa78326d0bf0 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -222,7 +222,7 @@ struct kaweth_device
222 int suspend_lowmem_ctrl; 222 int suspend_lowmem_ctrl;
223 int linkstate; 223 int linkstate;
224 int opened; 224 int opened;
225 struct work_struct lowmem_work; 225 struct delayed_work lowmem_work;
226 226
227 struct usb_device *dev; 227 struct usb_device *dev;
228 struct net_device *net; 228 struct net_device *net;
@@ -530,9 +530,10 @@ resubmit:
530 kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); 530 kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC);
531} 531}
532 532
533static void kaweth_resubmit_tl(void *d) 533static void kaweth_resubmit_tl(struct work_struct *work)
534{ 534{
535 struct kaweth_device *kaweth = (struct kaweth_device *)d; 535 struct kaweth_device *kaweth =
536 container_of(work, struct kaweth_device, lowmem_work.work);
536 537
537 if (IS_BLOCKED(kaweth->status)) 538 if (IS_BLOCKED(kaweth->status))
538 return; 539 return;
@@ -1126,7 +1127,7 @@ err_fw:
1126 1127
1127 /* kaweth is zeroed as part of alloc_netdev */ 1128 /* kaweth is zeroed as part of alloc_netdev */
1128 1129
1129 INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth); 1130 INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
1130 1131
1131 SET_MODULE_OWNER(netdev); 1132 SET_MODULE_OWNER(netdev);
1132 1133
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index 69eb0db399df..b5690b3834e3 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -1281,9 +1281,9 @@ static inline void setup_pegasus_II(pegasus_t * pegasus)
1281static struct workqueue_struct *pegasus_workqueue = NULL; 1281static struct workqueue_struct *pegasus_workqueue = NULL;
1282#define CARRIER_CHECK_DELAY (2 * HZ) 1282#define CARRIER_CHECK_DELAY (2 * HZ)
1283 1283
1284static void check_carrier(void *data) 1284static void check_carrier(struct work_struct *work)
1285{ 1285{
1286 pegasus_t *pegasus = data; 1286 pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
1287 set_carrier(pegasus->net); 1287 set_carrier(pegasus->net);
1288 if (!(pegasus->flags & PEGASUS_UNPLUG)) { 1288 if (!(pegasus->flags & PEGASUS_UNPLUG)) {
1289 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, 1289 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
@@ -1319,7 +1319,7 @@ static int pegasus_probe(struct usb_interface *intf,
1319 1319
1320 tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus); 1320 tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
1321 1321
1322 INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus); 1322 INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
1323 1323
1324 pegasus->intf = intf; 1324 pegasus->intf = intf;
1325 pegasus->usb = dev; 1325 pegasus->usb = dev;
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h
index 006438069b66..98f6898cae1f 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/usb/net/pegasus.h
@@ -95,7 +95,7 @@ typedef struct pegasus {
95 int dev_index; 95 int dev_index;
96 int intr_interval; 96 int intr_interval;
97 struct tasklet_struct rx_tl; 97 struct tasklet_struct rx_tl;
98 struct work_struct carrier_check; 98 struct delayed_work carrier_check;
99 struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb; 99 struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb;
100 struct sk_buff *rx_pool[RX_SKBS]; 100 struct sk_buff *rx_pool[RX_SKBS];
101 struct sk_buff *rx_skb; 101 struct sk_buff *rx_skb;
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 7672e11c94c4..327f97555679 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -782,9 +782,10 @@ static struct ethtool_ops usbnet_ethtool_ops = {
782 * especially now that control transfers can be queued. 782 * especially now that control transfers can be queued.
783 */ 783 */
784static void 784static void
785kevent (void *data) 785kevent (struct work_struct *work)
786{ 786{
787 struct usbnet *dev = data; 787 struct usbnet *dev =
788 container_of(work, struct usbnet, kevent);
788 int status; 789 int status;
789 790
790 /* usb_clear_halt() needs a thread context */ 791 /* usb_clear_halt() needs a thread context */
@@ -1146,7 +1147,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1146 skb_queue_head_init (&dev->done); 1147 skb_queue_head_init (&dev->done);
1147 dev->bh.func = usbnet_bh; 1148 dev->bh.func = usbnet_bh;
1148 dev->bh.data = (unsigned long) dev; 1149 dev->bh.data = (unsigned long) dev;
1149 INIT_WORK (&dev->kevent, kevent, dev); 1150 INIT_WORK (&dev->kevent, kevent);
1150 dev->delay.function = usbnet_bh; 1151 dev->delay.function = usbnet_bh;
1151 dev->delay.data = (unsigned long) dev; 1152 dev->delay.data = (unsigned long) dev;
1152 init_timer (&dev->delay); 1153 init_timer (&dev->delay);
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b1b5707bc99a..86bcf63b6ba5 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -92,6 +92,7 @@ struct aircable_private {
92 struct circ_buf *rx_buf; /* read buffer */ 92 struct circ_buf *rx_buf; /* read buffer */
93 int rx_flags; /* for throttilng */ 93 int rx_flags; /* for throttilng */
94 struct work_struct rx_work; /* work cue for the receiving line */ 94 struct work_struct rx_work; /* work cue for the receiving line */
95 struct usb_serial_port *port; /* USB port with which associated */
95}; 96};
96 97
97/* Private methods */ 98/* Private methods */
@@ -251,10 +252,11 @@ static void aircable_send(struct usb_serial_port *port)
251 schedule_work(&port->work); 252 schedule_work(&port->work);
252} 253}
253 254
254static void aircable_read(void *params) 255static void aircable_read(struct work_struct *work)
255{ 256{
256 struct usb_serial_port *port = params; 257 struct aircable_private *priv =
257 struct aircable_private *priv = usb_get_serial_port_data(port); 258 container_of(work, struct aircable_private, rx_work);
259 struct usb_serial_port *port = priv->port;
258 struct tty_struct *tty; 260 struct tty_struct *tty;
259 unsigned char *data; 261 unsigned char *data;
260 int count; 262 int count;
@@ -349,7 +351,8 @@ static int aircable_attach (struct usb_serial *serial)
349 } 351 }
350 352
351 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); 353 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
352 INIT_WORK(&priv->rx_work, aircable_read, port); 354 priv->port = port;
355 INIT_WORK(&priv->rx_work, aircable_read);
353 356
354 usb_set_serial_port_data(serial->port[0], priv); 357 usb_set_serial_port_data(serial->port[0], priv);
355 358
@@ -516,7 +519,7 @@ static void aircable_read_bulk_callback(struct urb *urb)
516 package_length - shift); 519 package_length - shift);
517 } 520 }
518 } 521 }
519 aircable_read(port); 522 aircable_read(&priv->rx_work);
520 } 523 }
521 524
522 /* Schedule the next read _if_ we are still open */ 525 /* Schedule the next read _if_ we are still open */
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 5e3ac281a2f8..83d0e21145b0 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -430,13 +430,14 @@ struct digi_port {
430 int dp_in_close; /* close in progress */ 430 int dp_in_close; /* close in progress */
431 wait_queue_head_t dp_close_wait; /* wait queue for close */ 431 wait_queue_head_t dp_close_wait; /* wait queue for close */
432 struct work_struct dp_wakeup_work; 432 struct work_struct dp_wakeup_work;
433 struct usb_serial_port *dp_port;
433}; 434};
434 435
435 436
436/* Local Function Declarations */ 437/* Local Function Declarations */
437 438
438static void digi_wakeup_write( struct usb_serial_port *port ); 439static void digi_wakeup_write( struct usb_serial_port *port );
439static void digi_wakeup_write_lock(void *); 440static void digi_wakeup_write_lock(struct work_struct *work);
440static int digi_write_oob_command( struct usb_serial_port *port, 441static int digi_write_oob_command( struct usb_serial_port *port,
441 unsigned char *buf, int count, int interruptible ); 442 unsigned char *buf, int count, int interruptible );
442static int digi_write_inb_command( struct usb_serial_port *port, 443static int digi_write_inb_command( struct usb_serial_port *port,
@@ -598,11 +599,12 @@ static inline long cond_wait_interruptible_timeout_irqrestore(
598* on writes. 599* on writes.
599*/ 600*/
600 601
601static void digi_wakeup_write_lock(void *arg) 602static void digi_wakeup_write_lock(struct work_struct *work)
602{ 603{
603 struct usb_serial_port *port = arg; 604 struct digi_port *priv =
605 container_of(work, struct digi_port, dp_wakeup_work);
606 struct usb_serial_port *port = priv->dp_port;
604 unsigned long flags; 607 unsigned long flags;
605 struct digi_port *priv = usb_get_serial_port_data(port);
606 608
607 609
608 spin_lock_irqsave( &priv->dp_port_lock, flags ); 610 spin_lock_irqsave( &priv->dp_port_lock, flags );
@@ -1702,8 +1704,8 @@ dbg( "digi_startup: TOP" );
1702 init_waitqueue_head( &priv->dp_flush_wait ); 1704 init_waitqueue_head( &priv->dp_flush_wait );
1703 priv->dp_in_close = 0; 1705 priv->dp_in_close = 0;
1704 init_waitqueue_head( &priv->dp_close_wait ); 1706 init_waitqueue_head( &priv->dp_close_wait );
1705 INIT_WORK(&priv->dp_wakeup_work, 1707 INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
1706 digi_wakeup_write_lock, serial->port[i]); 1708 priv->dp_port = serial->port[i];
1707 1709
1708 /* initialize write wait queue for this port */ 1710 /* initialize write wait queue for this port */
1709 init_waitqueue_head( &serial->port[i]->write_wait ); 1711 init_waitqueue_head( &serial->port[i]->write_wait );
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89ce2775be15..72e4d48f51e9 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -559,7 +559,8 @@ struct ftdi_private {
559 char prev_status, diff_status; /* Used for TIOCMIWAIT */ 559 char prev_status, diff_status; /* Used for TIOCMIWAIT */
560 __u8 rx_flags; /* receive state flags (throttling) */ 560 __u8 rx_flags; /* receive state flags (throttling) */
561 spinlock_t rx_lock; /* spinlock for receive state */ 561 spinlock_t rx_lock; /* spinlock for receive state */
562 struct work_struct rx_work; 562 struct delayed_work rx_work;
563 struct usb_serial_port *port;
563 int rx_processed; 564 int rx_processed;
564 unsigned long rx_bytes; 565 unsigned long rx_bytes;
565 566
@@ -593,7 +594,7 @@ static int ftdi_write_room (struct usb_serial_port *port);
593static int ftdi_chars_in_buffer (struct usb_serial_port *port); 594static int ftdi_chars_in_buffer (struct usb_serial_port *port);
594static void ftdi_write_bulk_callback (struct urb *urb); 595static void ftdi_write_bulk_callback (struct urb *urb);
595static void ftdi_read_bulk_callback (struct urb *urb); 596static void ftdi_read_bulk_callback (struct urb *urb);
596static void ftdi_process_read (void *param); 597static void ftdi_process_read (struct work_struct *work);
597static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); 598static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old);
598static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); 599static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file);
599static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); 600static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
@@ -1201,7 +1202,8 @@ static int ftdi_sio_attach (struct usb_serial *serial)
1201 port->read_urb->transfer_buffer_length = BUFSZ; 1202 port->read_urb->transfer_buffer_length = BUFSZ;
1202 } 1203 }
1203 1204
1204 INIT_WORK(&priv->rx_work, ftdi_process_read, port); 1205 INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
1206 priv->port = port;
1205 1207
1206 /* Free port's existing write urb and transfer buffer. */ 1208 /* Free port's existing write urb and transfer buffer. */
1207 if (port->write_urb) { 1209 if (port->write_urb) {
@@ -1640,17 +1642,18 @@ static void ftdi_read_bulk_callback (struct urb *urb)
1640 priv->rx_bytes += countread; 1642 priv->rx_bytes += countread;
1641 spin_unlock_irqrestore(&priv->rx_lock, flags); 1643 spin_unlock_irqrestore(&priv->rx_lock, flags);
1642 1644
1643 ftdi_process_read(port); 1645 ftdi_process_read(&priv->rx_work.work);
1644 1646
1645} /* ftdi_read_bulk_callback */ 1647} /* ftdi_read_bulk_callback */
1646 1648
1647 1649
1648static void ftdi_process_read (void *param) 1650static void ftdi_process_read (struct work_struct *work)
1649{ /* ftdi_process_read */ 1651{ /* ftdi_process_read */
1650 struct usb_serial_port *port = (struct usb_serial_port*)param; 1652 struct ftdi_private *priv =
1653 container_of(work, struct ftdi_private, rx_work.work);
1654 struct usb_serial_port *port = priv->port;
1651 struct urb *urb; 1655 struct urb *urb;
1652 struct tty_struct *tty; 1656 struct tty_struct *tty;
1653 struct ftdi_private *priv;
1654 char error_flag; 1657 char error_flag;
1655 unsigned char *data; 1658 unsigned char *data;
1656 1659
@@ -2179,7 +2182,7 @@ static void ftdi_unthrottle (struct usb_serial_port *port)
2179 spin_unlock_irqrestore(&priv->rx_lock, flags); 2182 spin_unlock_irqrestore(&priv->rx_lock, flags);
2180 2183
2181 if (actually_throttled) 2184 if (actually_throttled)
2182 schedule_work(&priv->rx_work); 2185 schedule_delayed_work(&priv->rx_work, 0);
2183} 2186}
2184 2187
2185static int __init ftdi_init (void) 2188static int __init ftdi_init (void)
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 909005107ea2..e09a0bfe6231 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -120,6 +120,8 @@ struct keyspan_pda_private {
120 int tx_throttled; 120 int tx_throttled;
121 struct work_struct wakeup_work; 121 struct work_struct wakeup_work;
122 struct work_struct unthrottle_work; 122 struct work_struct unthrottle_work;
123 struct usb_serial *serial;
124 struct usb_serial_port *port;
123}; 125};
124 126
125 127
@@ -175,9 +177,11 @@ static struct usb_device_id id_table_fake_xircom [] = {
175}; 177};
176#endif 178#endif
177 179
178static void keyspan_pda_wakeup_write( struct usb_serial_port *port ) 180static void keyspan_pda_wakeup_write(struct work_struct *work)
179{ 181{
180 182 struct keyspan_pda_private *priv =
183 container_of(work, struct keyspan_pda_private, wakeup_work);
184 struct usb_serial_port *port = priv->port;
181 struct tty_struct *tty = port->tty; 185 struct tty_struct *tty = port->tty;
182 186
183 /* wake up port processes */ 187 /* wake up port processes */
@@ -187,8 +191,11 @@ static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
187 tty_wakeup(tty); 191 tty_wakeup(tty);
188} 192}
189 193
190static void keyspan_pda_request_unthrottle( struct usb_serial *serial ) 194static void keyspan_pda_request_unthrottle(struct work_struct *work)
191{ 195{
196 struct keyspan_pda_private *priv =
197 container_of(work, struct keyspan_pda_private, unthrottle_work);
198 struct usb_serial *serial = priv->serial;
192 int result; 199 int result;
193 200
194 dbg(" request_unthrottle"); 201 dbg(" request_unthrottle");
@@ -765,11 +772,10 @@ static int keyspan_pda_startup (struct usb_serial *serial)
765 return (1); /* error */ 772 return (1); /* error */
766 usb_set_serial_port_data(serial->port[0], priv); 773 usb_set_serial_port_data(serial->port[0], priv);
767 init_waitqueue_head(&serial->port[0]->write_wait); 774 init_waitqueue_head(&serial->port[0]->write_wait);
768 INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write, 775 INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
769 (void *)(serial->port[0])); 776 INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
770 INIT_WORK(&priv->unthrottle_work, 777 priv->serial = serial;
771 (void *)keyspan_pda_request_unthrottle, 778 priv->port = serial->port[0];
772 (void *)(serial));
773 return (0); 779 return (0);
774} 780}
775 781
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index c1257d5292f5..3d5072f14b8d 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -533,9 +533,10 @@ void usb_serial_port_softint(struct usb_serial_port *port)
533 schedule_work(&port->work); 533 schedule_work(&port->work);
534} 534}
535 535
536static void usb_serial_port_work(void *private) 536static void usb_serial_port_work(struct work_struct *work)
537{ 537{
538 struct usb_serial_port *port = private; 538 struct usb_serial_port *port =
539 container_of(work, struct usb_serial_port, work);
539 struct tty_struct *tty; 540 struct tty_struct *tty;
540 541
541 dbg("%s - port %d", __FUNCTION__, port->number); 542 dbg("%s - port %d", __FUNCTION__, port->number);
@@ -799,7 +800,7 @@ int usb_serial_probe(struct usb_interface *interface,
799 port->serial = serial; 800 port->serial = serial;
800 spin_lock_init(&port->lock); 801 spin_lock_init(&port->lock);
801 mutex_init(&port->mutex); 802 mutex_init(&port->mutex);
802 INIT_WORK(&port->work, usb_serial_port_work, port); 803 INIT_WORK(&port->work, usb_serial_port_work);
803 serial->port[i] = port; 804 serial->port[i] = port;
804 } 805 }
805 806
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 4d1cd7aeccd3..154c7d290597 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -227,6 +227,7 @@ struct whiteheat_private {
227 struct list_head rx_urbs_submitted; 227 struct list_head rx_urbs_submitted;
228 struct list_head rx_urb_q; 228 struct list_head rx_urb_q;
229 struct work_struct rx_work; 229 struct work_struct rx_work;
230 struct usb_serial_port *port;
230 struct list_head tx_urbs_free; 231 struct list_head tx_urbs_free;
231 struct list_head tx_urbs_submitted; 232 struct list_head tx_urbs_submitted;
232}; 233};
@@ -241,7 +242,7 @@ static void command_port_read_callback(struct urb *urb);
241static int start_port_read(struct usb_serial_port *port); 242static int start_port_read(struct usb_serial_port *port);
242static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head); 243static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head);
243static struct list_head *list_first(struct list_head *head); 244static struct list_head *list_first(struct list_head *head);
244static void rx_data_softint(void *private); 245static void rx_data_softint(struct work_struct *work);
245 246
246static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize); 247static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize);
247static int firm_open(struct usb_serial_port *port); 248static int firm_open(struct usb_serial_port *port);
@@ -424,7 +425,8 @@ static int whiteheat_attach (struct usb_serial *serial)
424 spin_lock_init(&info->lock); 425 spin_lock_init(&info->lock);
425 info->flags = 0; 426 info->flags = 0;
426 info->mcr = 0; 427 info->mcr = 0;
427 INIT_WORK(&info->rx_work, rx_data_softint, port); 428 INIT_WORK(&info->rx_work, rx_data_softint);
429 info->port = port;
428 430
429 INIT_LIST_HEAD(&info->rx_urbs_free); 431 INIT_LIST_HEAD(&info->rx_urbs_free);
430 INIT_LIST_HEAD(&info->rx_urbs_submitted); 432 INIT_LIST_HEAD(&info->rx_urbs_submitted);
@@ -949,7 +951,7 @@ static void whiteheat_unthrottle (struct usb_serial_port *port)
949 spin_unlock_irqrestore(&info->lock, flags); 951 spin_unlock_irqrestore(&info->lock, flags);
950 952
951 if (actually_throttled) 953 if (actually_throttled)
952 rx_data_softint(port); 954 rx_data_softint(&info->rx_work);
953 955
954 return; 956 return;
955} 957}
@@ -1400,10 +1402,11 @@ static struct list_head *list_first(struct list_head *head)
1400} 1402}
1401 1403
1402 1404
1403static void rx_data_softint(void *private) 1405static void rx_data_softint(struct work_struct *work)
1404{ 1406{
1405 struct usb_serial_port *port = (struct usb_serial_port *)private; 1407 struct whiteheat_private *info =
1406 struct whiteheat_private *info = usb_get_serial_port_data(port); 1408 container_of(work, struct whiteheat_private, rx_work);
1409 struct usb_serial_port *port = info->port;
1407 struct tty_struct *tty = port->tty; 1410 struct tty_struct *tty = port->tty;
1408 struct whiteheat_urb_wrap *wrap; 1411 struct whiteheat_urb_wrap *wrap;
1409 struct urb *urb; 1412 struct urb *urb;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 302174b8e477..31f476a64790 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -383,9 +383,9 @@ static void fbcon_update_softback(struct vc_data *vc)
383 softback_top = 0; 383 softback_top = 0;
384} 384}
385 385
386static void fb_flashcursor(void *private) 386static void fb_flashcursor(struct work_struct *work)
387{ 387{
388 struct fb_info *info = private; 388 struct fb_info *info = container_of(work, struct fb_info, queue);
389 struct fbcon_ops *ops = info->fbcon_par; 389 struct fbcon_ops *ops = info->fbcon_par;
390 struct display *p; 390 struct display *p;
391 struct vc_data *vc = NULL; 391 struct vc_data *vc = NULL;
@@ -442,7 +442,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
442 if ((!info->queue.func || info->queue.func == fb_flashcursor) && 442 if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
443 !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { 443 !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
444 if (!info->queue.func) 444 if (!info->queue.func)
445 INIT_WORK(&info->queue, fb_flashcursor, info); 445 INIT_WORK(&info->queue, fb_flashcursor);
446 446
447 init_timer(&ops->cursor_timer); 447 init_timer(&ops->cursor_timer);
448 ops->cursor_timer.function = cursor_timer_handler; 448 ops->cursor_timer.function = cursor_timer_handler;
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 90a79c784549..944273c3dbff 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -110,8 +110,8 @@ struct v9fs_mux_rpc {
110}; 110};
111 111
112static int v9fs_poll_proc(void *); 112static int v9fs_poll_proc(void *);
113static void v9fs_read_work(void *); 113static void v9fs_read_work(struct work_struct *work);
114static void v9fs_write_work(void *); 114static void v9fs_write_work(struct work_struct *work);
115static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, 115static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
116 poll_table * p); 116 poll_table * p);
117static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); 117static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
@@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
297 m->rbuf = NULL; 297 m->rbuf = NULL;
298 m->wpos = m->wsize = 0; 298 m->wpos = m->wsize = 0;
299 m->wbuf = NULL; 299 m->wbuf = NULL;
300 INIT_WORK(&m->rq, v9fs_read_work, m); 300 INIT_WORK(&m->rq, v9fs_read_work);
301 INIT_WORK(&m->wq, v9fs_write_work, m); 301 INIT_WORK(&m->wq, v9fs_write_work);
302 m->wsched = 0; 302 m->wsched = 0;
303 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); 303 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
304 m->poll_task = NULL; 304 m->poll_task = NULL;
@@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a)
458/** 458/**
459 * v9fs_write_work - called when a transport can send some data 459 * v9fs_write_work - called when a transport can send some data
460 */ 460 */
461static void v9fs_write_work(void *a) 461static void v9fs_write_work(struct work_struct *work)
462{ 462{
463 int n, err; 463 int n, err;
464 struct v9fs_mux_data *m; 464 struct v9fs_mux_data *m;
465 struct v9fs_req *req; 465 struct v9fs_req *req;
466 466
467 m = a; 467 m = container_of(work, struct v9fs_mux_data, wq);
468 468
469 if (m->err < 0) { 469 if (m->err < 0) {
470 clear_bit(Wworksched, &m->wsched); 470 clear_bit(Wworksched, &m->wsched);
@@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
564/** 564/**
565 * v9fs_read_work - called when there is some data to be read from a transport 565 * v9fs_read_work - called when there is some data to be read from a transport
566 */ 566 */
567static void v9fs_read_work(void *a) 567static void v9fs_read_work(struct work_struct *work)
568{ 568{
569 int n, err; 569 int n, err;
570 struct v9fs_mux_data *m; 570 struct v9fs_mux_data *m;
@@ -572,7 +572,7 @@ static void v9fs_read_work(void *a)
572 struct v9fs_fcall *rcall; 572 struct v9fs_fcall *rcall;
573 char *rbuf; 573 char *rbuf;
574 574
575 m = a; 575 m = container_of(work, struct v9fs_mux_data, rq);
576 576
577 if (m->err < 0) 577 if (m->err < 0)
578 return; 578 return;
diff --git a/fs/aio.c b/fs/aio.c
index 277a5f2d18ad..287a1bc7a182 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -53,13 +53,13 @@ static kmem_cache_t *kioctx_cachep;
53static struct workqueue_struct *aio_wq; 53static struct workqueue_struct *aio_wq;
54 54
55/* Used for rare fput completion. */ 55/* Used for rare fput completion. */
56static void aio_fput_routine(void *); 56static void aio_fput_routine(struct work_struct *);
57static DECLARE_WORK(fput_work, aio_fput_routine, NULL); 57static DECLARE_WORK(fput_work, aio_fput_routine);
58 58
59static DEFINE_SPINLOCK(fput_lock); 59static DEFINE_SPINLOCK(fput_lock);
60static LIST_HEAD(fput_head); 60static LIST_HEAD(fput_head);
61 61
62static void aio_kick_handler(void *); 62static void aio_kick_handler(struct work_struct *);
63static void aio_queue_work(struct kioctx *); 63static void aio_queue_work(struct kioctx *);
64 64
65/* aio_setup 65/* aio_setup
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
227 227
228 INIT_LIST_HEAD(&ctx->active_reqs); 228 INIT_LIST_HEAD(&ctx->active_reqs);
229 INIT_LIST_HEAD(&ctx->run_list); 229 INIT_LIST_HEAD(&ctx->run_list);
230 INIT_WORK(&ctx->wq, aio_kick_handler, ctx); 230 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
231 231
232 if (aio_setup_ring(ctx) < 0) 232 if (aio_setup_ring(ctx) < 0)
233 goto out_freectx; 233 goto out_freectx;
@@ -469,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
469 wake_up(&ctx->wait); 469 wake_up(&ctx->wait);
470} 470}
471 471
472static void aio_fput_routine(void *data) 472static void aio_fput_routine(struct work_struct *data)
473{ 473{
474 spin_lock_irq(&fput_lock); 474 spin_lock_irq(&fput_lock);
475 while (likely(!list_empty(&fput_head))) { 475 while (likely(!list_empty(&fput_head))) {
@@ -857,9 +857,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx)
857 * space. 857 * space.
858 * Run on aiod's context. 858 * Run on aiod's context.
859 */ 859 */
860static void aio_kick_handler(void *data) 860static void aio_kick_handler(struct work_struct *work)
861{ 861{
862 struct kioctx *ctx = data; 862 struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
863 mm_segment_t oldfs = get_fs(); 863 mm_segment_t oldfs = get_fs();
864 int requeue; 864 int requeue;
865 865
@@ -874,7 +874,7 @@ static void aio_kick_handler(void *data)
874 * we're in a worker thread already, don't use queue_delayed_work, 874 * we're in a worker thread already, don't use queue_delayed_work,
875 */ 875 */
876 if (requeue) 876 if (requeue)
877 queue_work(aio_wq, &ctx->wq); 877 queue_delayed_work(aio_wq, &ctx->wq, 0);
878} 878}
879 879
880 880
diff --git a/fs/bio.c b/fs/bio.c
index aa4d09bd4e71..50c40ce2cead 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -940,16 +940,16 @@ static void bio_release_pages(struct bio *bio)
940 * run one bio_put() against the BIO. 940 * run one bio_put() against the BIO.
941 */ 941 */
942 942
943static void bio_dirty_fn(void *data); 943static void bio_dirty_fn(struct work_struct *work);
944 944
945static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); 945static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
946static DEFINE_SPINLOCK(bio_dirty_lock); 946static DEFINE_SPINLOCK(bio_dirty_lock);
947static struct bio *bio_dirty_list; 947static struct bio *bio_dirty_list;
948 948
949/* 949/*
950 * This runs in process context 950 * This runs in process context
951 */ 951 */
952static void bio_dirty_fn(void *data) 952static void bio_dirty_fn(struct work_struct *work)
953{ 953{
954 unsigned long flags; 954 unsigned long flags;
955 struct bio *bio; 955 struct bio *bio;
diff --git a/fs/file.c b/fs/file.c
index 8e81775c5dc8..3787e82f54c1 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -91,8 +91,10 @@ out:
91 spin_unlock(&fddef->lock); 91 spin_unlock(&fddef->lock);
92} 92}
93 93
94static void free_fdtable_work(struct fdtable_defer *f) 94static void free_fdtable_work(struct work_struct *work)
95{ 95{
96 struct fdtable_defer *f =
97 container_of(work, struct fdtable_defer, wq);
96 struct fdtable *fdt; 98 struct fdtable *fdt;
97 99
98 spin_lock_bh(&f->lock); 100 spin_lock_bh(&f->lock);
@@ -351,7 +353,7 @@ static void __devinit fdtable_defer_list_init(int cpu)
351{ 353{
352 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 354 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
353 spin_lock_init(&fddef->lock); 355 spin_lock_init(&fddef->lock);
354 INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); 356 INIT_WORK(&fddef->wq, free_fdtable_work);
355 init_timer(&fddef->timer); 357 init_timer(&fddef->timer);
356 fddef->timer.data = (unsigned long)fddef; 358 fddef->timer.data = (unsigned long)fddef;
357 fddef->timer.function = fdtable_timer; 359 fddef->timer.function = fdtable_timer;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 78fe0fae23ff..55f5333dae99 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -35,7 +35,7 @@
35 35
36struct greedy { 36struct greedy {
37 struct gfs2_holder gr_gh; 37 struct gfs2_holder gr_gh;
38 struct work_struct gr_work; 38 struct delayed_work gr_work;
39}; 39};
40 40
41struct gfs2_gl_hash_bucket { 41struct gfs2_gl_hash_bucket {
@@ -1368,9 +1368,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1368 glops->go_xmote_th(gl, state, flags); 1368 glops->go_xmote_th(gl, state, flags);
1369} 1369}
1370 1370
1371static void greedy_work(void *data) 1371static void greedy_work(struct work_struct *work)
1372{ 1372{
1373 struct greedy *gr = data; 1373 struct greedy *gr = container_of(work, struct greedy, gr_work.work);
1374 struct gfs2_holder *gh = &gr->gr_gh; 1374 struct gfs2_holder *gh = &gr->gr_gh;
1375 struct gfs2_glock *gl = gh->gh_gl; 1375 struct gfs2_glock *gl = gh->gh_gl;
1376 const struct gfs2_glock_operations *glops = gl->gl_ops; 1376 const struct gfs2_glock_operations *glops = gl->gl_ops;
@@ -1422,7 +1422,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1422 1422
1423 gfs2_holder_init(gl, 0, 0, gh); 1423 gfs2_holder_init(gl, 0, 0, gh);
1424 set_bit(HIF_GREEDY, &gh->gh_iflags); 1424 set_bit(HIF_GREEDY, &gh->gh_iflags);
1425 INIT_WORK(&gr->gr_work, greedy_work, gr); 1425 INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
1426 1426
1427 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); 1427 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1428 schedule_delayed_work(&gr->gr_work, time); 1428 schedule_delayed_work(&gr->gr_work, time);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 42e3bef270c9..72dad552aa00 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
577 server->rcv.ptr = (unsigned char*)&server->rcv.buf; 577 server->rcv.ptr = (unsigned char*)&server->rcv.buf;
578 server->rcv.len = 10; 578 server->rcv.len = 10;
579 server->rcv.state = 0; 579 server->rcv.state = 0;
580 INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server); 580 INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
581 INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server); 581 INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
582 sock->sk->sk_write_space = ncp_tcp_write_space; 582 sock->sk->sk_write_space = ncp_tcp_write_space;
583 } else { 583 } else {
584 INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server); 584 INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
585 INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server); 585 INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
586 server->timeout_tm.data = (unsigned long)server; 586 server->timeout_tm.data = (unsigned long)server;
587 server->timeout_tm.function = ncpdgram_timeout_call; 587 server->timeout_tm.function = ncpdgram_timeout_call;
588 } 588 }
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 11c2b252ebed..e496d8b65e92 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -350,9 +350,10 @@ static void info_server(struct ncp_server *server, unsigned int id, const void *
350 } 350 }
351} 351}
352 352
353void ncpdgram_rcv_proc(void *s) 353void ncpdgram_rcv_proc(struct work_struct *work)
354{ 354{
355 struct ncp_server *server = s; 355 struct ncp_server *server =
356 container_of(work, struct ncp_server, rcv.tq);
356 struct socket* sock; 357 struct socket* sock;
357 358
358 sock = server->ncp_sock; 359 sock = server->ncp_sock;
@@ -468,9 +469,10 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server)
468 } 469 }
469} 470}
470 471
471void ncpdgram_timeout_proc(void *s) 472void ncpdgram_timeout_proc(struct work_struct *work)
472{ 473{
473 struct ncp_server *server = s; 474 struct ncp_server *server =
475 container_of(work, struct ncp_server, timeout_tq);
474 mutex_lock(&server->rcv.creq_mutex); 476 mutex_lock(&server->rcv.creq_mutex);
475 __ncpdgram_timeout_proc(server); 477 __ncpdgram_timeout_proc(server);
476 mutex_unlock(&server->rcv.creq_mutex); 478 mutex_unlock(&server->rcv.creq_mutex);
@@ -652,18 +654,20 @@ skipdata:;
652 } 654 }
653} 655}
654 656
655void ncp_tcp_rcv_proc(void *s) 657void ncp_tcp_rcv_proc(struct work_struct *work)
656{ 658{
657 struct ncp_server *server = s; 659 struct ncp_server *server =
660 container_of(work, struct ncp_server, rcv.tq);
658 661
659 mutex_lock(&server->rcv.creq_mutex); 662 mutex_lock(&server->rcv.creq_mutex);
660 __ncptcp_rcv_proc(server); 663 __ncptcp_rcv_proc(server);
661 mutex_unlock(&server->rcv.creq_mutex); 664 mutex_unlock(&server->rcv.creq_mutex);
662} 665}
663 666
664void ncp_tcp_tx_proc(void *s) 667void ncp_tcp_tx_proc(struct work_struct *work)
665{ 668{
666 struct ncp_server *server = s; 669 struct ncp_server *server =
670 container_of(work, struct ncp_server, tx.tq);
667 671
668 mutex_lock(&server->rcv.creq_mutex); 672 mutex_lock(&server->rcv.creq_mutex);
669 __ncptcp_try_send(server); 673 __ncptcp_try_send(server);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 5fea638743e4..23ab145daa2d 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
143 INIT_LIST_HEAD(&clp->cl_state_owners); 143 INIT_LIST_HEAD(&clp->cl_state_owners);
144 INIT_LIST_HEAD(&clp->cl_unused); 144 INIT_LIST_HEAD(&clp->cl_unused);
145 spin_lock_init(&clp->cl_lock); 145 spin_lock_init(&clp->cl_lock);
146 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); 146 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
148 clp->cl_boot_time = CURRENT_TIME; 148 clp->cl_boot_time = CURRENT_TIME;
149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; 149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index ec1114b33d89..371b804e7cc8 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -18,10 +18,10 @@
18 18
19#define NFSDBG_FACILITY NFSDBG_VFS 19#define NFSDBG_FACILITY NFSDBG_VFS
20 20
21static void nfs_expire_automounts(void *list); 21static void nfs_expire_automounts(struct work_struct *work);
22 22
23LIST_HEAD(nfs_automount_list); 23LIST_HEAD(nfs_automount_list);
24static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); 24static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
25int nfs_mountpoint_expiry_timeout = 500 * HZ; 25int nfs_mountpoint_expiry_timeout = 500 * HZ;
26 26
27static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, 27static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = {
164 .follow_link = nfs_follow_mountpoint, 164 .follow_link = nfs_follow_mountpoint,
165}; 165};
166 166
167static void nfs_expire_automounts(void *data) 167static void nfs_expire_automounts(struct work_struct *work)
168{ 168{
169 struct list_head *list = (struct list_head *)data; 169 struct list_head *list = &nfs_automount_list;
170 170
171 mark_mounts_for_expiry(list); 171 mark_mounts_for_expiry(list);
172 if (!list_empty(list)) 172 if (!list_empty(list))
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6f346677332d..c26cd978c7cc 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2];
185extern void nfs4_schedule_state_renewal(struct nfs_client *); 185extern void nfs4_schedule_state_renewal(struct nfs_client *);
186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
187extern void nfs4_kill_renewd(struct nfs_client *); 187extern void nfs4_kill_renewd(struct nfs_client *);
188extern void nfs4_renew_state(void *); 188extern void nfs4_renew_state(struct work_struct *);
189 189
190/* nfs4state.c */ 190/* nfs4state.c */
191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); 191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 7b6df1852e75..823298561c0a 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,9 +59,10 @@
59#define NFSDBG_FACILITY NFSDBG_PROC 59#define NFSDBG_FACILITY NFSDBG_PROC
60 60
61void 61void
62nfs4_renew_state(void *data) 62nfs4_renew_state(struct work_struct *work)
63{ 63{
64 struct nfs_client *clp = (struct nfs_client *)data; 64 struct nfs_client *clp =
65 container_of(work, struct nfs_client, cl_renewd.work);
65 struct rpc_cred *cred; 66 struct rpc_cred *cred;
66 long lease, timeout; 67 long lease, timeout;
67 unsigned long last, now; 68 unsigned long last, now;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 293b6495829f..e431e93ab503 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1829,9 +1829,8 @@ out:
1829} 1829}
1830 1830
1831static struct workqueue_struct *laundry_wq; 1831static struct workqueue_struct *laundry_wq;
1832static struct work_struct laundromat_work; 1832static void laundromat_main(struct work_struct *);
1833static void laundromat_main(void *); 1833static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
1834static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
1835 1834
1836__be32 1835__be32
1837nfsd4_renew(clientid_t *clid) 1836nfsd4_renew(clientid_t *clid)
@@ -1940,7 +1939,7 @@ nfs4_laundromat(void)
1940} 1939}
1941 1940
1942void 1941void
1943laundromat_main(void *not_used) 1942laundromat_main(struct work_struct *not_used)
1944{ 1943{
1945 time_t t; 1944 time_t t;
1946 1945
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f43bc5f18a35..0b2ad163005e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1205,10 +1205,12 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
1205 return status; 1205 return status;
1206} 1206}
1207 1207
1208static void ocfs2_truncate_log_worker(void *data) 1208static void ocfs2_truncate_log_worker(struct work_struct *work)
1209{ 1209{
1210 int status; 1210 int status;
1211 struct ocfs2_super *osb = data; 1211 struct ocfs2_super *osb =
1212 container_of(work, struct ocfs2_super,
1213 osb_truncate_log_wq.work);
1212 1214
1213 mlog_entry_void(); 1215 mlog_entry_void();
1214 1216
@@ -1441,7 +1443,8 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
1441 /* ocfs2_truncate_log_shutdown keys on the existence of 1443 /* ocfs2_truncate_log_shutdown keys on the existence of
1442 * osb->osb_tl_inode so we don't set any of the osb variables 1444 * osb->osb_tl_inode so we don't set any of the osb variables
1443 * until we're sure all is well. */ 1445 * until we're sure all is well. */
1444 INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); 1446 INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
1447 ocfs2_truncate_log_worker);
1445 osb->osb_tl_bh = tl_bh; 1448 osb->osb_tl_bh = tl_bh;
1446 osb->osb_tl_inode = tl_inode; 1449 osb->osb_tl_inode = tl_inode;
1447 1450
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 305cba3681fe..4cd9a9580456 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -141,7 +141,7 @@ struct o2hb_region {
141 * recognizes a node going up and down in one iteration */ 141 * recognizes a node going up and down in one iteration */
142 u64 hr_generation; 142 u64 hr_generation;
143 143
144 struct work_struct hr_write_timeout_work; 144 struct delayed_work hr_write_timeout_work;
145 unsigned long hr_last_timeout_start; 145 unsigned long hr_last_timeout_start;
146 146
147 /* Used during o2hb_check_slot to hold a copy of the block 147 /* Used during o2hb_check_slot to hold a copy of the block
@@ -156,9 +156,11 @@ struct o2hb_bio_wait_ctxt {
156 int wc_error; 156 int wc_error;
157}; 157};
158 158
159static void o2hb_write_timeout(void *arg) 159static void o2hb_write_timeout(struct work_struct *work)
160{ 160{
161 struct o2hb_region *reg = arg; 161 struct o2hb_region *reg =
162 container_of(work, struct o2hb_region,
163 hr_write_timeout_work.work);
162 164
163 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " 165 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
164 "milliseconds\n", reg->hr_dev_name, 166 "milliseconds\n", reg->hr_dev_name,
@@ -1404,7 +1406,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1404 goto out; 1406 goto out;
1405 } 1407 }
1406 1408
1407 INIT_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout, reg); 1409 INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
1408 1410
1409 /* 1411 /*
1410 * A node is considered live after it has beat LIVE_THRESHOLD 1412 * A node is considered live after it has beat LIVE_THRESHOLD
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 7bba98fbfc15..4705d659fe57 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -88,7 +88,7 @@ void o2quo_disk_timeout(void)
88 o2quo_fence_self(); 88 o2quo_fence_self();
89} 89}
90 90
91static void o2quo_make_decision(void *arg) 91static void o2quo_make_decision(struct work_struct *work)
92{ 92{
93 int quorum; 93 int quorum;
94 int lowest_hb, lowest_reachable = 0, fence = 0; 94 int lowest_hb, lowest_reachable = 0, fence = 0;
@@ -306,7 +306,7 @@ void o2quo_init(void)
306 struct o2quo_state *qs = &o2quo_state; 306 struct o2quo_state *qs = &o2quo_state;
307 307
308 spin_lock_init(&qs->qs_lock); 308 spin_lock_init(&qs->qs_lock);
309 INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL); 309 INIT_WORK(&qs->qs_work, o2quo_make_decision);
310} 310}
311 311
312void o2quo_exit(void) 312void o2quo_exit(void)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index b650efa8c8be..9b3209dc0b16 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] =
140 [O2NET_ERR_DIED] = -EHOSTDOWN,}; 140 [O2NET_ERR_DIED] = -EHOSTDOWN,};
141 141
142/* can't quite avoid *all* internal declarations :/ */ 142/* can't quite avoid *all* internal declarations :/ */
143static void o2net_sc_connect_completed(void *arg); 143static void o2net_sc_connect_completed(struct work_struct *work);
144static void o2net_rx_until_empty(void *arg); 144static void o2net_rx_until_empty(struct work_struct *work);
145static void o2net_shutdown_sc(void *arg); 145static void o2net_shutdown_sc(struct work_struct *work);
146static void o2net_listen_data_ready(struct sock *sk, int bytes); 146static void o2net_listen_data_ready(struct sock *sk, int bytes);
147static void o2net_sc_send_keep_req(void *arg); 147static void o2net_sc_send_keep_req(struct work_struct *work);
148static void o2net_idle_timer(unsigned long data); 148static void o2net_idle_timer(unsigned long data);
149static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); 149static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
150 150
@@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
308 o2nm_node_get(node); 308 o2nm_node_get(node);
309 sc->sc_node = node; 309 sc->sc_node = node;
310 310
311 INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc); 311 INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
312 INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc); 312 INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
313 INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc); 313 INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
314 INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc); 314 INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
315 315
316 init_timer(&sc->sc_idle_timeout); 316 init_timer(&sc->sc_idle_timeout);
317 sc->sc_idle_timeout.function = o2net_idle_timer; 317 sc->sc_idle_timeout.function = o2net_idle_timer;
@@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc,
342 sc_put(sc); 342 sc_put(sc);
343} 343}
344static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, 344static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
345 struct work_struct *work, 345 struct delayed_work *work,
346 int delay) 346 int delay)
347{ 347{
348 sc_get(sc); 348 sc_get(sc);
@@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
350 sc_put(sc); 350 sc_put(sc);
351} 351}
352static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, 352static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
353 struct work_struct *work) 353 struct delayed_work *work)
354{ 354{
355 if (cancel_delayed_work(work)) 355 if (cancel_delayed_work(work))
356 sc_put(sc); 356 sc_put(sc);
@@ -564,9 +564,11 @@ static void o2net_ensure_shutdown(struct o2net_node *nn,
564 * ourselves as state_change couldn't get the nn_lock and call set_nn_state 564 * ourselves as state_change couldn't get the nn_lock and call set_nn_state
565 * itself. 565 * itself.
566 */ 566 */
567static void o2net_shutdown_sc(void *arg) 567static void o2net_shutdown_sc(struct work_struct *work)
568{ 568{
569 struct o2net_sock_container *sc = arg; 569 struct o2net_sock_container *sc =
570 container_of(work, struct o2net_sock_container,
571 sc_shutdown_work);
570 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 572 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
571 573
572 sclog(sc, "shutting down\n"); 574 sclog(sc, "shutting down\n");
@@ -1201,9 +1203,10 @@ out:
1201/* this work func is triggerd by data ready. it reads until it can read no 1203/* this work func is triggerd by data ready. it reads until it can read no
1202 * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing 1204 * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
1203 * our work the work struct will be marked and we'll be called again. */ 1205 * our work the work struct will be marked and we'll be called again. */
1204static void o2net_rx_until_empty(void *arg) 1206static void o2net_rx_until_empty(struct work_struct *work)
1205{ 1207{
1206 struct o2net_sock_container *sc = arg; 1208 struct o2net_sock_container *sc =
1209 container_of(work, struct o2net_sock_container, sc_rx_work);
1207 int ret; 1210 int ret;
1208 1211
1209 do { 1212 do {
@@ -1249,9 +1252,11 @@ static int o2net_set_nodelay(struct socket *sock)
1249 1252
1250/* called when a connect completes and after a sock is accepted. the 1253/* called when a connect completes and after a sock is accepted. the
1251 * rx path will see the response and mark the sc valid */ 1254 * rx path will see the response and mark the sc valid */
1252static void o2net_sc_connect_completed(void *arg) 1255static void o2net_sc_connect_completed(struct work_struct *work)
1253{ 1256{
1254 struct o2net_sock_container *sc = arg; 1257 struct o2net_sock_container *sc =
1258 container_of(work, struct o2net_sock_container,
1259 sc_connect_work);
1255 1260
1256 mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", 1261 mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
1257 (unsigned long long)O2NET_PROTOCOL_VERSION, 1262 (unsigned long long)O2NET_PROTOCOL_VERSION,
@@ -1262,9 +1267,11 @@ static void o2net_sc_connect_completed(void *arg)
1262} 1267}
1263 1268
1264/* this is called as a work_struct func. */ 1269/* this is called as a work_struct func. */
1265static void o2net_sc_send_keep_req(void *arg) 1270static void o2net_sc_send_keep_req(struct work_struct *work)
1266{ 1271{
1267 struct o2net_sock_container *sc = arg; 1272 struct o2net_sock_container *sc =
1273 container_of(work, struct o2net_sock_container,
1274 sc_keepalive_work.work);
1268 1275
1269 o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); 1276 o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
1270 sc_put(sc); 1277 sc_put(sc);
@@ -1314,14 +1321,15 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
1314 * having a connect attempt fail, etc. This centralizes the logic which decides 1321 * having a connect attempt fail, etc. This centralizes the logic which decides
1315 * if a connect attempt should be made or if we should give up and all future 1322 * if a connect attempt should be made or if we should give up and all future
1316 * transmit attempts should fail */ 1323 * transmit attempts should fail */
1317static void o2net_start_connect(void *arg) 1324static void o2net_start_connect(struct work_struct *work)
1318{ 1325{
1319 struct o2net_node *nn = arg; 1326 struct o2net_node *nn =
1327 container_of(work, struct o2net_node, nn_connect_work.work);
1320 struct o2net_sock_container *sc = NULL; 1328 struct o2net_sock_container *sc = NULL;
1321 struct o2nm_node *node = NULL, *mynode = NULL; 1329 struct o2nm_node *node = NULL, *mynode = NULL;
1322 struct socket *sock = NULL; 1330 struct socket *sock = NULL;
1323 struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; 1331 struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
1324 int ret = 0; 1332 int ret = 0, stop;
1325 1333
1326 /* if we're greater we initiate tx, otherwise we accept */ 1334 /* if we're greater we initiate tx, otherwise we accept */
1327 if (o2nm_this_node() <= o2net_num_from_nn(nn)) 1335 if (o2nm_this_node() <= o2net_num_from_nn(nn))
@@ -1342,10 +1350,9 @@ static void o2net_start_connect(void *arg)
1342 1350
1343 spin_lock(&nn->nn_lock); 1351 spin_lock(&nn->nn_lock);
1344 /* see if we already have one pending or have given up */ 1352 /* see if we already have one pending or have given up */
1345 if (nn->nn_sc || nn->nn_persistent_error) 1353 stop = (nn->nn_sc || nn->nn_persistent_error);
1346 arg = NULL;
1347 spin_unlock(&nn->nn_lock); 1354 spin_unlock(&nn->nn_lock);
1348 if (arg == NULL) /* *shrug*, needed some indicator */ 1355 if (stop)
1349 goto out; 1356 goto out;
1350 1357
1351 nn->nn_last_connect_attempt = jiffies; 1358 nn->nn_last_connect_attempt = jiffies;
@@ -1421,9 +1428,10 @@ out:
1421 return; 1428 return;
1422} 1429}
1423 1430
1424static void o2net_connect_expired(void *arg) 1431static void o2net_connect_expired(struct work_struct *work)
1425{ 1432{
1426 struct o2net_node *nn = arg; 1433 struct o2net_node *nn =
1434 container_of(work, struct o2net_node, nn_connect_expired.work);
1427 1435
1428 spin_lock(&nn->nn_lock); 1436 spin_lock(&nn->nn_lock);
1429 if (!nn->nn_sc_valid) { 1437 if (!nn->nn_sc_valid) {
@@ -1436,9 +1444,10 @@ static void o2net_connect_expired(void *arg)
1436 spin_unlock(&nn->nn_lock); 1444 spin_unlock(&nn->nn_lock);
1437} 1445}
1438 1446
1439static void o2net_still_up(void *arg) 1447static void o2net_still_up(struct work_struct *work)
1440{ 1448{
1441 struct o2net_node *nn = arg; 1449 struct o2net_node *nn =
1450 container_of(work, struct o2net_node, nn_still_up.work);
1442 1451
1443 o2quo_hb_still_up(o2net_num_from_nn(nn)); 1452 o2quo_hb_still_up(o2net_num_from_nn(nn));
1444} 1453}
@@ -1644,9 +1653,9 @@ out:
1644 return ret; 1653 return ret;
1645} 1654}
1646 1655
1647static void o2net_accept_many(void *arg) 1656static void o2net_accept_many(struct work_struct *work)
1648{ 1657{
1649 struct socket *sock = arg; 1658 struct socket *sock = o2net_listen_sock;
1650 while (o2net_accept_one(sock) == 0) 1659 while (o2net_accept_one(sock) == 0)
1651 cond_resched(); 1660 cond_resched();
1652} 1661}
@@ -1700,7 +1709,7 @@ static int o2net_open_listening_sock(__be16 port)
1700 write_unlock_bh(&sock->sk->sk_callback_lock); 1709 write_unlock_bh(&sock->sk->sk_callback_lock);
1701 1710
1702 o2net_listen_sock = sock; 1711 o2net_listen_sock = sock;
1703 INIT_WORK(&o2net_listen_work, o2net_accept_many, sock); 1712 INIT_WORK(&o2net_listen_work, o2net_accept_many);
1704 1713
1705 sock->sk->sk_reuse = 1; 1714 sock->sk->sk_reuse = 1;
1706 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 1715 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
@@ -1819,9 +1828,10 @@ int o2net_init(void)
1819 struct o2net_node *nn = o2net_nn_from_num(i); 1828 struct o2net_node *nn = o2net_nn_from_num(i);
1820 1829
1821 spin_lock_init(&nn->nn_lock); 1830 spin_lock_init(&nn->nn_lock);
1822 INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn); 1831 INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
1823 INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn); 1832 INIT_DELAYED_WORK(&nn->nn_connect_expired,
1824 INIT_WORK(&nn->nn_still_up, o2net_still_up, nn); 1833 o2net_connect_expired);
1834 INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
1825 /* until we see hb from a node we'll return einval */ 1835 /* until we see hb from a node we'll return einval */
1826 nn->nn_persistent_error = -ENOTCONN; 1836 nn->nn_persistent_error = -ENOTCONN;
1827 init_waitqueue_head(&nn->nn_sc_wq); 1837 init_waitqueue_head(&nn->nn_sc_wq);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 4b46aac7d243..daebbd3a2c8c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -86,18 +86,18 @@ struct o2net_node {
86 * connect attempt fails and so can be self-arming. shutdown is 86 * connect attempt fails and so can be self-arming. shutdown is
87 * careful to first mark the nn such that no connects will be attempted 87 * careful to first mark the nn such that no connects will be attempted
88 * before canceling delayed connect work and flushing the queue. */ 88 * before canceling delayed connect work and flushing the queue. */
89 struct work_struct nn_connect_work; 89 struct delayed_work nn_connect_work;
90 unsigned long nn_last_connect_attempt; 90 unsigned long nn_last_connect_attempt;
91 91
92 /* this is queued as nodes come up and is canceled when a connection is 92 /* this is queued as nodes come up and is canceled when a connection is
93 * established. this expiring gives up on the node and errors out 93 * established. this expiring gives up on the node and errors out
94 * transmits */ 94 * transmits */
95 struct work_struct nn_connect_expired; 95 struct delayed_work nn_connect_expired;
96 96
97 /* after we give up on a socket we wait a while before deciding 97 /* after we give up on a socket we wait a while before deciding
98 * that it is still heartbeating and that we should do some 98 * that it is still heartbeating and that we should do some
99 * quorum work */ 99 * quorum work */
100 struct work_struct nn_still_up; 100 struct delayed_work nn_still_up;
101}; 101};
102 102
103struct o2net_sock_container { 103struct o2net_sock_container {
@@ -129,7 +129,7 @@ struct o2net_sock_container {
129 struct work_struct sc_shutdown_work; 129 struct work_struct sc_shutdown_work;
130 130
131 struct timer_list sc_idle_timeout; 131 struct timer_list sc_idle_timeout;
132 struct work_struct sc_keepalive_work; 132 struct delayed_work sc_keepalive_work;
133 133
134 unsigned sc_handshake_ok:1; 134 unsigned sc_handshake_ok:1;
135 135
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index fa968180b072..6b6ff76538c5 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned
153 * called functions that cannot be directly called from the 153 * called functions that cannot be directly called from the
154 * net message handlers for some reason, usually because 154 * net message handlers for some reason, usually because
155 * they need to send net messages of their own. */ 155 * they need to send net messages of their own. */
156void dlm_dispatch_work(void *data); 156void dlm_dispatch_work(struct work_struct *work);
157 157
158struct dlm_lock_resource; 158struct dlm_lock_resource;
159struct dlm_work_item; 159struct dlm_work_item;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 8d1065f8b3bd..637646e6922e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1296,7 +1296,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1296 1296
1297 spin_lock_init(&dlm->work_lock); 1297 spin_lock_init(&dlm->work_lock);
1298 INIT_LIST_HEAD(&dlm->work_list); 1298 INIT_LIST_HEAD(&dlm->work_list);
1299 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); 1299 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
1300 1300
1301 kref_init(&dlm->dlm_refs); 1301 kref_init(&dlm->dlm_refs);
1302 dlm->dlm_state = DLM_CTXT_NEW; 1302 dlm->dlm_state = DLM_CTXT_NEW;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 9d950d7cea38..fb3e2b0817f1 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -153,9 +153,10 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
153} 153}
154 154
155/* Worker function used during recovery. */ 155/* Worker function used during recovery. */
156void dlm_dispatch_work(void *data) 156void dlm_dispatch_work(struct work_struct *work)
157{ 157{
158 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; 158 struct dlm_ctxt *dlm =
159 container_of(work, struct dlm_ctxt, dispatched_work);
159 LIST_HEAD(tmp_list); 160 LIST_HEAD(tmp_list);
160 struct list_head *iter, *iter2; 161 struct list_head *iter, *iter2;
161 struct dlm_work_item *item; 162 struct dlm_work_item *item;
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index eead48bbfac6..7d2f578b267d 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -171,15 +171,14 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
171 BUG(); 171 BUG();
172} 172}
173 173
174static void user_dlm_unblock_lock(void *opaque); 174static void user_dlm_unblock_lock(struct work_struct *work);
175 175
176static void __user_dlm_queue_lockres(struct user_lock_res *lockres) 176static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
177{ 177{
178 if (!(lockres->l_flags & USER_LOCK_QUEUED)) { 178 if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
179 user_dlm_grab_inode_ref(lockres); 179 user_dlm_grab_inode_ref(lockres);
180 180
181 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, 181 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
182 lockres);
183 182
184 queue_work(user_dlm_worker, &lockres->l_work); 183 queue_work(user_dlm_worker, &lockres->l_work);
185 lockres->l_flags |= USER_LOCK_QUEUED; 184 lockres->l_flags |= USER_LOCK_QUEUED;
@@ -279,10 +278,11 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
279 iput(inode); 278 iput(inode);
280} 279}
281 280
282static void user_dlm_unblock_lock(void *opaque) 281static void user_dlm_unblock_lock(struct work_struct *work)
283{ 282{
284 int new_level, status; 283 int new_level, status;
285 struct user_lock_res *lockres = (struct user_lock_res *) opaque; 284 struct user_lock_res *lockres =
285 container_of(work, struct user_lock_res, l_work);
286 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); 286 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
287 287
288 mlog(0, "processing lockres %.*s\n", lockres->l_namelen, 288 mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index fd9734def551..d95ee2720e6e 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -911,11 +911,12 @@ struct ocfs2_la_recovery_item {
911 * NOTE: This function can and will sleep on recovery of other nodes 911 * NOTE: This function can and will sleep on recovery of other nodes
912 * during cluster locking, just like any other ocfs2 process. 912 * during cluster locking, just like any other ocfs2 process.
913 */ 913 */
914void ocfs2_complete_recovery(void *data) 914void ocfs2_complete_recovery(struct work_struct *work)
915{ 915{
916 int ret; 916 int ret;
917 struct ocfs2_super *osb = data; 917 struct ocfs2_journal *journal =
918 struct ocfs2_journal *journal = osb->journal; 918 container_of(work, struct ocfs2_journal, j_recovery_work);
919 struct ocfs2_super *osb = journal->j_osb;
919 struct ocfs2_dinode *la_dinode, *tl_dinode; 920 struct ocfs2_dinode *la_dinode, *tl_dinode;
920 struct ocfs2_la_recovery_item *item; 921 struct ocfs2_la_recovery_item *item;
921 struct list_head *p, *n; 922 struct list_head *p, *n;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 2f3a6acdac45..5be161a4ad9f 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -172,7 +172,7 @@ static inline void ocfs2_handle_set_sync(struct ocfs2_journal_handle *handle, in
172} 172}
173 173
174/* Exported only for the journal struct init code in super.c. Do not call. */ 174/* Exported only for the journal struct init code in super.c. Do not call. */
175void ocfs2_complete_recovery(void *data); 175void ocfs2_complete_recovery(struct work_struct *work);
176 176
177/* 177/*
178 * Journal Control: 178 * Journal Control:
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 0462a7f4e21b..9b1bad1d48ec 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -283,7 +283,7 @@ struct ocfs2_super
283 /* Truncate log info */ 283 /* Truncate log info */
284 struct inode *osb_tl_inode; 284 struct inode *osb_tl_inode;
285 struct buffer_head *osb_tl_bh; 285 struct buffer_head *osb_tl_bh;
286 struct work_struct osb_truncate_log_wq; 286 struct delayed_work osb_truncate_log_wq;
287 287
288 struct ocfs2_node_map osb_recovering_orphan_dirs; 288 struct ocfs2_node_map osb_recovering_orphan_dirs;
289 unsigned int *osb_orphan_wipes; 289 unsigned int *osb_orphan_wipes;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 76b46ebbb10c..9a8089030f55 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1365 spin_lock_init(&journal->j_lock); 1365 spin_lock_init(&journal->j_lock);
1366 journal->j_trans_id = (unsigned long) 1; 1366 journal->j_trans_id = (unsigned long) 1;
1367 INIT_LIST_HEAD(&journal->j_la_cleanups); 1367 INIT_LIST_HEAD(&journal->j_la_cleanups);
1368 INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb); 1368 INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
1369 journal->j_state = OCFS2_JOURNAL_FREE; 1369 journal->j_state = OCFS2_JOURNAL_FREE;
1370 1370
1371 /* get some pseudo constants for clustersize bits */ 1371 /* get some pseudo constants for clustersize bits */
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index ac93174c9639..7280a23ef344 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super,
104 struct reiserfs_journal *journal); 104 struct reiserfs_journal *journal);
105static int dirty_one_transaction(struct super_block *s, 105static int dirty_one_transaction(struct super_block *s,
106 struct reiserfs_journal_list *jl); 106 struct reiserfs_journal_list *jl);
107static void flush_async_commits(void *p); 107static void flush_async_commits(struct work_struct *work);
108static void queue_log_writer(struct super_block *s); 108static void queue_log_writer(struct super_block *s);
109 109
110/* values for join in do_journal_begin_r */ 110/* values for join in do_journal_begin_r */
@@ -2836,7 +2836,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2836 if (reiserfs_mounted_fs_count <= 1) 2836 if (reiserfs_mounted_fs_count <= 1)
2837 commit_wq = create_workqueue("reiserfs"); 2837 commit_wq = create_workqueue("reiserfs");
2838 2838
2839 INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); 2839 INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2840 journal->j_work_sb = p_s_sb;
2840 return 0; 2841 return 0;
2841 free_and_return: 2842 free_and_return:
2842 free_journal_ram(p_s_sb); 2843 free_journal_ram(p_s_sb);
@@ -3447,10 +3448,11 @@ int journal_end_sync(struct reiserfs_transaction_handle *th,
3447/* 3448/*
3448** writeback the pending async commits to disk 3449** writeback the pending async commits to disk
3449*/ 3450*/
3450static void flush_async_commits(void *p) 3451static void flush_async_commits(struct work_struct *work)
3451{ 3452{
3452 struct super_block *p_s_sb = p; 3453 struct reiserfs_journal *journal =
3453 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3454 container_of(work, struct reiserfs_journal, j_work.work);
3455 struct super_block *p_s_sb = journal->j_work_sb;
3454 struct reiserfs_journal_list *jl; 3456 struct reiserfs_journal_list *jl;
3455 struct list_head *entry; 3457 struct list_head *entry;
3456 3458
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 09360cf1e1f2..8e6b56fc1cad 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -149,9 +149,10 @@ xfs_destroy_ioend(
149 */ 149 */
150STATIC void 150STATIC void
151xfs_end_bio_delalloc( 151xfs_end_bio_delalloc(
152 void *data) 152 struct work_struct *work)
153{ 153{
154 xfs_ioend_t *ioend = data; 154 xfs_ioend_t *ioend =
155 container_of(work, xfs_ioend_t, io_work);
155 156
156 xfs_destroy_ioend(ioend); 157 xfs_destroy_ioend(ioend);
157} 158}
@@ -161,9 +162,10 @@ xfs_end_bio_delalloc(
161 */ 162 */
162STATIC void 163STATIC void
163xfs_end_bio_written( 164xfs_end_bio_written(
164 void *data) 165 struct work_struct *work)
165{ 166{
166 xfs_ioend_t *ioend = data; 167 xfs_ioend_t *ioend =
168 container_of(work, xfs_ioend_t, io_work);
167 169
168 xfs_destroy_ioend(ioend); 170 xfs_destroy_ioend(ioend);
169} 171}
@@ -176,9 +178,10 @@ xfs_end_bio_written(
176 */ 178 */
177STATIC void 179STATIC void
178xfs_end_bio_unwritten( 180xfs_end_bio_unwritten(
179 void *data) 181 struct work_struct *work)
180{ 182{
181 xfs_ioend_t *ioend = data; 183 xfs_ioend_t *ioend =
184 container_of(work, xfs_ioend_t, io_work);
182 bhv_vnode_t *vp = ioend->io_vnode; 185 bhv_vnode_t *vp = ioend->io_vnode;
183 xfs_off_t offset = ioend->io_offset; 186 xfs_off_t offset = ioend->io_offset;
184 size_t size = ioend->io_size; 187 size_t size = ioend->io_size;
@@ -220,11 +223,11 @@ xfs_alloc_ioend(
220 ioend->io_size = 0; 223 ioend->io_size = 0;
221 224
222 if (type == IOMAP_UNWRITTEN) 225 if (type == IOMAP_UNWRITTEN)
223 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); 226 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
224 else if (type == IOMAP_DELAY) 227 else if (type == IOMAP_DELAY)
225 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); 228 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
226 else 229 else
227 INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); 230 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
228 231
229 return ioend; 232 return ioend;
230} 233}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d3382843698e..eef4a0ba11e9 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -994,9 +994,10 @@ xfs_buf_wait_unpin(
994 994
995STATIC void 995STATIC void
996xfs_buf_iodone_work( 996xfs_buf_iodone_work(
997 void *v) 997 struct work_struct *work)
998{ 998{
999 xfs_buf_t *bp = (xfs_buf_t *)v; 999 xfs_buf_t *bp =
1000 container_of(work, xfs_buf_t, b_iodone_work);
1000 1001
1001 if (bp->b_iodone) 1002 if (bp->b_iodone)
1002 (*(bp->b_iodone))(bp); 1003 (*(bp->b_iodone))(bp);
@@ -1017,10 +1018,10 @@ xfs_buf_ioend(
1017 1018
1018 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { 1019 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1019 if (schedule) { 1020 if (schedule) {
1020 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); 1021 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1021 queue_work(xfslogd_workqueue, &bp->b_iodone_work); 1022 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1022 } else { 1023 } else {
1023 xfs_buf_iodone_work(bp); 1024 xfs_buf_iodone_work(&bp->b_iodone_work);
1024 } 1025 }
1025 } else { 1026 } else {
1026 up(&bp->b_iodonesema); 1027 up(&bp->b_iodonesema);
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 0d71c0041f13..9e350fd44d77 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -194,7 +194,7 @@ struct kioctx {
194 194
195 struct aio_ring_info ring_info; 195 struct aio_ring_info ring_info;
196 196
197 struct work_struct wq; 197 struct delayed_work wq;
198}; 198};
199 199
200/* prototypes */ 200/* prototypes */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 4c02119c6ab9..3ea1cd58de97 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -133,7 +133,7 @@ struct cn_callback_data {
133struct cn_callback_entry { 133struct cn_callback_entry {
134 struct list_head callback_entry; 134 struct list_head callback_entry;
135 struct cn_callback *cb; 135 struct cn_callback *cb;
136 struct work_struct work; 136 struct delayed_work work;
137 struct cn_queue_dev *pdev; 137 struct cn_queue_dev *pdev;
138 138
139 struct cn_callback_id id; 139 struct cn_callback_id id;
@@ -170,7 +170,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev);
170 170
171int cn_cb_equal(struct cb_id *, struct cb_id *); 171int cn_cb_equal(struct cb_id *, struct cb_id *);
172 172
173void cn_queue_wrapper(void *data); 173void cn_queue_wrapper(struct work_struct *work);
174 174
175extern int cn_already_initialized; 175extern int cn_already_initialized;
176 176
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index c115e9e840b4..1fb02e17f6f6 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -461,7 +461,7 @@ struct i2o_driver {
461 int (*reply) (struct i2o_controller *, u32, struct i2o_message *); 461 int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
462 462
463 /* Event handler */ 463 /* Event handler */
464 void (*event) (struct i2o_event *); 464 work_func_t event;
465 465
466 struct workqueue_struct *event_queue; /* Event queue */ 466 struct workqueue_struct *event_queue; /* Event queue */
467 467
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index efe0ee4cc80b..06c58c423fe1 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
158 if (t->buf.tail != NULL) 158 if (t->buf.tail != NULL)
159 t->buf.tail->commit = t->buf.tail->used; 159 t->buf.tail->commit = t->buf.tail->used;
160 spin_unlock_irqrestore(&t->buf.lock, flags); 160 spin_unlock_irqrestore(&t->buf.lock, flags);
161 schedule_work(&t->buf.work); 161 schedule_delayed_work(&t->buf.work, 0);
162} 162}
163 163
164#endif 164#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index abd2debebca2..b3f32eadbef5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -568,8 +568,9 @@ struct ata_port {
568 struct ata_host *host; 568 struct ata_host *host;
569 struct device *dev; 569 struct device *dev;
570 570
571 struct work_struct port_task; 571 void *port_task_data;
572 struct work_struct hotplug_task; 572 struct delayed_work port_task;
573 struct delayed_work hotplug_task;
573 struct work_struct scsi_rescan_task; 574 struct work_struct scsi_rescan_task;
574 575
575 unsigned int hsm_task_state; 576 unsigned int hsm_task_state;
@@ -747,7 +748,7 @@ extern int ata_ratelimit(void);
747extern unsigned int ata_busy_sleep(struct ata_port *ap, 748extern unsigned int ata_busy_sleep(struct ata_port *ap,
748 unsigned long timeout_pat, 749 unsigned long timeout_pat,
749 unsigned long timeout); 750 unsigned long timeout);
750extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), 751extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
751 void *data, unsigned long delay); 752 void *data, unsigned long delay);
752extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 753extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
753 unsigned long interval_msec, 754 unsigned long interval_msec,
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 528e7d3fecb1..c15ae1986b98 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -110,7 +110,7 @@ struct mmc_host {
110 struct mmc_card *card_busy; /* the MMC card claiming host */ 110 struct mmc_card *card_busy; /* the MMC card claiming host */
111 struct mmc_card *card_selected; /* the selected MMC card */ 111 struct mmc_card *card_selected; /* the selected MMC card */
112 112
113 struct work_struct detect; 113 struct delayed_work detect;
114 114
115 unsigned long private[0] ____cacheline_aligned; 115 unsigned long private[0] ____cacheline_aligned;
116}; 116};
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index b089d9506283..a503052138bd 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -127,10 +127,10 @@ struct ncp_server {
127 } unexpected_packet; 127 } unexpected_packet;
128}; 128};
129 129
130extern void ncp_tcp_rcv_proc(void *server); 130extern void ncp_tcp_rcv_proc(struct work_struct *work);
131extern void ncp_tcp_tx_proc(void *server); 131extern void ncp_tcp_tx_proc(struct work_struct *work);
132extern void ncpdgram_rcv_proc(void *server); 132extern void ncpdgram_rcv_proc(struct work_struct *work);
133extern void ncpdgram_timeout_proc(void *server); 133extern void ncpdgram_timeout_proc(struct work_struct *work);
134extern void ncpdgram_timeout_call(unsigned long server); 134extern void ncpdgram_timeout_call(unsigned long server);
135extern void ncp_tcp_data_ready(struct sock* sk, int len); 135extern void ncp_tcp_data_ready(struct sock* sk, int len);
136extern void ncp_tcp_write_space(struct sock* sk); 136extern void ncp_tcp_write_space(struct sock* sk);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7ccfc7ef0a83..95796e6924f1 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -51,7 +51,7 @@ struct nfs_client {
51 51
52 unsigned long cl_lease_time; 52 unsigned long cl_lease_time;
53 unsigned long cl_last_renewal; 53 unsigned long cl_last_renewal;
54 struct work_struct cl_renewd; 54 struct delayed_work cl_renewd;
55 55
56 struct rpc_wait_queue cl_rpcwaitq; 56 struct rpc_wait_queue cl_rpcwaitq;
57 57
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 62a7169aed8b..3a28742d86f9 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -249,7 +249,8 @@ struct reiserfs_journal {
249 int j_errno; 249 int j_errno;
250 250
251 /* when flushing ordered buffers, throttle new ordered writers */ 251 /* when flushing ordered buffers, throttle new ordered writers */
252 struct work_struct j_work; 252 struct delayed_work j_work;
253 struct super_block *j_work_sb;
253 atomic_t j_async_throttle; 254 atomic_t j_async_throttle;
254}; 255};
255 256
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 24accb483849..0e3d91b76996 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -38,7 +38,7 @@ struct rchan_buf
38 size_t subbufs_consumed; /* count of sub-buffers consumed */ 38 size_t subbufs_consumed; /* count of sub-buffers consumed */
39 struct rchan *chan; /* associated channel */ 39 struct rchan *chan; /* associated channel */
40 wait_queue_head_t read_wait; /* reader wait queue */ 40 wait_queue_head_t read_wait; /* reader wait queue */
41 struct work_struct wake_readers; /* reader wake-up work struct */ 41 struct delayed_work wake_readers; /* reader wake-up work struct */
42 struct dentry *dentry; /* channel file dentry */ 42 struct dentry *dentry; /* channel file dentry */
43 struct kref kref; /* channel buffer refcount */ 43 struct kref kref; /* channel buffer refcount */
44 struct page **page_array; /* array of current buffer pages */ 44 struct page **page_array; /* array of current buffer pages */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index a2eb9b4a9de3..4a68125b6de6 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -30,7 +30,7 @@ struct rpc_inode {
30#define RPC_PIPE_WAIT_FOR_OPEN 1 30#define RPC_PIPE_WAIT_FOR_OPEN 1
31 int flags; 31 int flags;
32 struct rpc_pipe_ops *ops; 32 struct rpc_pipe_ops *ops;
33 struct work_struct queue_timeout; 33 struct delayed_work queue_timeout;
34}; 34};
35 35
36static inline struct rpc_inode * 36static inline struct rpc_inode *
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 60394fbc4c70..3e04c1512fc4 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -177,7 +177,7 @@ struct rpc_xprt {
177 unsigned long connect_timeout, 177 unsigned long connect_timeout,
178 bind_timeout, 178 bind_timeout,
179 reestablish_timeout; 179 reestablish_timeout;
180 struct work_struct connect_worker; 180 struct delayed_work connect_worker;
181 unsigned short port; 181 unsigned short port;
182 182
183 /* 183 /*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 65321f911c1e..f717f0898238 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -53,7 +53,7 @@ struct tty_buffer {
53}; 53};
54 54
55struct tty_bufhead { 55struct tty_bufhead {
56 struct work_struct work; 56 struct delayed_work work;
57 struct semaphore pty_sem; 57 struct semaphore pty_sem;
58 spinlock_t lock; 58 spinlock_t lock;
59 struct tty_buffer *head; /* Queue head */ 59 struct tty_buffer *head; /* Queue head */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 0cd73edeef13..aab5b1b72021 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -388,7 +388,7 @@ struct usb_device {
388 388
389 int pm_usage_cnt; /* usage counter for autosuspend */ 389 int pm_usage_cnt; /* usage counter for autosuspend */
390#ifdef CONFIG_PM 390#ifdef CONFIG_PM
391 struct work_struct autosuspend; /* for delayed autosuspends */ 391 struct delayed_work autosuspend; /* for delayed autosuspends */
392 struct mutex pm_mutex; /* protects PM operations */ 392 struct mutex pm_mutex; /* protects PM operations */
393 393
394 unsigned auto_pm:1; /* autosuspend/resume in progress */ 394 unsigned auto_pm:1; /* autosuspend/resume in progress */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9bca3539a1e5..4a3ea83c6d16 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -11,12 +11,23 @@
11 11
12struct workqueue_struct; 12struct workqueue_struct;
13 13
14struct work_struct;
15typedef void (*work_func_t)(struct work_struct *work);
16
14struct work_struct { 17struct work_struct {
15 unsigned long pending; 18 /* the first word is the work queue pointer and the flags rolled into
19 * one */
20 unsigned long management;
21#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
22#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
23#define WORK_STRUCT_FLAG_MASK (3UL)
24#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
16 struct list_head entry; 25 struct list_head entry;
17 void (*func)(void *); 26 work_func_t func;
18 void *data; 27};
19 void *wq_data; 28
29struct delayed_work {
30 struct work_struct work;
20 struct timer_list timer; 31 struct timer_list timer;
21}; 32};
22 33
@@ -24,36 +35,117 @@ struct execute_work {
24 struct work_struct work; 35 struct work_struct work;
25}; 36};
26 37
27#define __WORK_INITIALIZER(n, f, d) { \ 38#define __WORK_INITIALIZER(n, f) { \
39 .management = 0, \
40 .entry = { &(n).entry, &(n).entry }, \
41 .func = (f), \
42 }
43
44#define __WORK_INITIALIZER_NAR(n, f) { \
45 .management = (1 << WORK_STRUCT_NOAUTOREL), \
28 .entry = { &(n).entry, &(n).entry }, \ 46 .entry = { &(n).entry, &(n).entry }, \
29 .func = (f), \ 47 .func = (f), \
30 .data = (d), \ 48 }
49
50#define __DELAYED_WORK_INITIALIZER(n, f) { \
51 .work = __WORK_INITIALIZER((n).work, (f)), \
52 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
53 }
54
55#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \
56 .work = __WORK_INITIALIZER_NAR((n).work, (f)), \
31 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 57 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
32 } 58 }
33 59
34#define DECLARE_WORK(n, f, d) \ 60#define DECLARE_WORK(n, f) \
35 struct work_struct n = __WORK_INITIALIZER(n, f, d) 61 struct work_struct n = __WORK_INITIALIZER(n, f)
62
63#define DECLARE_WORK_NAR(n, f) \
64 struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
65
66#define DECLARE_DELAYED_WORK(n, f) \
67 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
68
69#define DECLARE_DELAYED_WORK_NAR(n, f) \
70 struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
36 71
37/* 72/*
38 * initialize a work-struct's func and data pointers: 73 * initialize a work item's function pointer
39 */ 74 */
40#define PREPARE_WORK(_work, _func, _data) \ 75#define PREPARE_WORK(_work, _func) \
41 do { \ 76 do { \
42 (_work)->func = _func; \ 77 (_work)->func = (_func); \
43 (_work)->data = _data; \
44 } while (0) 78 } while (0)
45 79
80#define PREPARE_DELAYED_WORK(_work, _func) \
81 PREPARE_WORK(&(_work)->work, (_func))
82
46/* 83/*
47 * initialize all of a work-struct: 84 * initialize all of a work item in one go
48 */ 85 */
49#define INIT_WORK(_work, _func, _data) \ 86#define INIT_WORK(_work, _func) \
50 do { \ 87 do { \
88 (_work)->management = 0; \
51 INIT_LIST_HEAD(&(_work)->entry); \ 89 INIT_LIST_HEAD(&(_work)->entry); \
52 (_work)->pending = 0; \ 90 PREPARE_WORK((_work), (_func)); \
53 PREPARE_WORK((_work), (_func), (_data)); \ 91 } while (0)
92
93#define INIT_WORK_NAR(_work, _func) \
94 do { \
95 (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \
96 INIT_LIST_HEAD(&(_work)->entry); \
97 PREPARE_WORK((_work), (_func)); \
98 } while (0)
99
100#define INIT_DELAYED_WORK(_work, _func) \
101 do { \
102 INIT_WORK(&(_work)->work, (_func)); \
103 init_timer(&(_work)->timer); \
104 } while (0)
105
106#define INIT_DELAYED_WORK_NAR(_work, _func) \
107 do { \
108 INIT_WORK_NAR(&(_work)->work, (_func)); \
54 init_timer(&(_work)->timer); \ 109 init_timer(&(_work)->timer); \
55 } while (0) 110 } while (0)
56 111
112/**
113 * work_pending - Find out whether a work item is currently pending
114 * @work: The work item in question
115 */
116#define work_pending(work) \
117 test_bit(WORK_STRUCT_PENDING, &(work)->management)
118
119/**
120 * delayed_work_pending - Find out whether a delayable work item is currently
121 * pending
122 * @work: The work item in question
123 */
124#define delayed_work_pending(work) \
125 test_bit(WORK_STRUCT_PENDING, &(work)->work.management)
126
127/**
128 * work_release - Release a work item under execution
129 * @work: The work item to release
130 *
131 * This is used to release a work item that has been initialised with automatic
132 * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work
133 * function the opportunity to grab auxiliary data from the container of the
134 * work_struct before clearing the pending bit as the work_struct may be
135 * subject to deallocation the moment the pending bit is cleared.
136 *
137 * In such a case, this should be called in the work function after it has
138 * fetched any data it may require from the containter of the work_struct.
139 * After this function has been called, the work_struct may be scheduled for
140 * further execution or it may be deallocated unless other precautions are
141 * taken.
142 *
143 * This should also be used to release a delayed work item.
144 */
145#define work_release(work) \
146 clear_bit(WORK_STRUCT_PENDING, &(work)->management)
147
148
57extern struct workqueue_struct *__create_workqueue(const char *name, 149extern struct workqueue_struct *__create_workqueue(const char *name,
58 int singlethread); 150 int singlethread);
59#define create_workqueue(name) __create_workqueue((name), 0) 151#define create_workqueue(name) __create_workqueue((name), 0)
@@ -62,39 +154,38 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
62extern void destroy_workqueue(struct workqueue_struct *wq); 154extern void destroy_workqueue(struct workqueue_struct *wq);
63 155
64extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 156extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
65extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); 157extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
66extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 158extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
67 struct work_struct *work, unsigned long delay); 159 struct delayed_work *work, unsigned long delay);
68extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 160extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
69 161
70extern int FASTCALL(schedule_work(struct work_struct *work)); 162extern int FASTCALL(schedule_work(struct work_struct *work));
71extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); 163extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
72 164
73extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); 165extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
74extern int schedule_on_each_cpu(void (*func)(void *info), void *info); 166extern int schedule_on_each_cpu(work_func_t func);
75extern void flush_scheduled_work(void); 167extern void flush_scheduled_work(void);
76extern int current_is_keventd(void); 168extern int current_is_keventd(void);
77extern int keventd_up(void); 169extern int keventd_up(void);
78 170
79extern void init_workqueues(void); 171extern void init_workqueues(void);
80void cancel_rearming_delayed_work(struct work_struct *work); 172void cancel_rearming_delayed_work(struct delayed_work *work);
81void cancel_rearming_delayed_workqueue(struct workqueue_struct *, 173void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
82 struct work_struct *); 174 struct delayed_work *);
83int execute_in_process_context(void (*fn)(void *), void *, 175int execute_in_process_context(work_func_t fn, struct execute_work *);
84 struct execute_work *);
85 176
86/* 177/*
87 * Kill off a pending schedule_delayed_work(). Note that the work callback 178 * Kill off a pending schedule_delayed_work(). Note that the work callback
88 * function may still be running on return from cancel_delayed_work(). Run 179 * function may still be running on return from cancel_delayed_work(). Run
89 * flush_scheduled_work() to wait on it. 180 * flush_scheduled_work() to wait on it.
90 */ 181 */
91static inline int cancel_delayed_work(struct work_struct *work) 182static inline int cancel_delayed_work(struct delayed_work *work)
92{ 183{
93 int ret; 184 int ret;
94 185
95 ret = del_timer_sync(&work->timer); 186 ret = del_timer_sync(&work->timer);
96 if (ret) 187 if (ret)
97 clear_bit(0, &work->pending); 188 clear_bit(WORK_STRUCT_PENDING, &work->work.management);
98 return ret; 189 return ret;
99} 190}
100 191
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h
index 617b672b1132..89119277553d 100644
--- a/include/net/ieee80211softmac.h
+++ b/include/net/ieee80211softmac.h
@@ -108,8 +108,8 @@ struct ieee80211softmac_assoc_info {
108 /* Scan retries remaining */ 108 /* Scan retries remaining */
109 int scan_retry; 109 int scan_retry;
110 110
111 struct work_struct work; 111 struct delayed_work work;
112 struct work_struct timeout; 112 struct delayed_work timeout;
113}; 113};
114 114
115struct ieee80211softmac_bss_info { 115struct ieee80211softmac_bss_info {
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 5f48748fe017..f7be1ac73601 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -84,7 +84,7 @@ struct inet_timewait_death_row {
84}; 84};
85 85
86extern void inet_twdr_hangman(unsigned long data); 86extern void inet_twdr_hangman(unsigned long data);
87extern void inet_twdr_twkill_work(void *data); 87extern void inet_twdr_twkill_work(struct work_struct *work);
88extern void inet_twdr_twcal_tick(unsigned long data); 88extern void inet_twdr_twcal_tick(unsigned long data);
89 89
90#if (BITS_PER_LONG == 64) 90#if (BITS_PER_LONG == 64)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f8cbe40f52c0..c089f93ba591 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1030,7 +1030,7 @@ void sctp_inq_init(struct sctp_inq *);
1030void sctp_inq_free(struct sctp_inq *); 1030void sctp_inq_free(struct sctp_inq *);
1031void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet); 1031void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
1032struct sctp_chunk *sctp_inq_pop(struct sctp_inq *); 1032struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
1033void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *); 1033void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
1034 1034
1035/* This is the structure we use to hold outbound chunks. You push 1035/* This is the structure we use to hold outbound chunks. You push
1036 * chunks in and they automatically pop out the other end as bundled 1036 * chunks in and they automatically pop out the other end as bundled
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 1d77b63c5ea4..1f989fb42c70 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -201,9 +201,14 @@ struct domain_device {
201 void *lldd_dev; 201 void *lldd_dev;
202}; 202};
203 203
204struct sas_discovery_event {
205 struct work_struct work;
206 struct asd_sas_port *port;
207};
208
204struct sas_discovery { 209struct sas_discovery {
205 spinlock_t disc_event_lock; 210 spinlock_t disc_event_lock;
206 struct work_struct disc_work[DISC_NUM_EVENTS]; 211 struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
207 unsigned long pending; 212 unsigned long pending;
208 u8 fanout_sas_addr[8]; 213 u8 fanout_sas_addr[8];
209 u8 eeds_a[8]; 214 u8 eeds_a[8];
@@ -249,14 +254,19 @@ struct asd_sas_port {
249 void *lldd_port; /* not touched by the sas class code */ 254 void *lldd_port; /* not touched by the sas class code */
250}; 255};
251 256
257struct asd_sas_event {
258 struct work_struct work;
259 struct asd_sas_phy *phy;
260};
261
252/* The phy pretty much is controlled by the LLDD. 262/* The phy pretty much is controlled by the LLDD.
253 * The class only reads those fields. 263 * The class only reads those fields.
254 */ 264 */
255struct asd_sas_phy { 265struct asd_sas_phy {
256/* private: */ 266/* private: */
257 /* protected by ha->event_lock */ 267 /* protected by ha->event_lock */
258 struct work_struct port_events[PORT_NUM_EVENTS]; 268 struct asd_sas_event port_events[PORT_NUM_EVENTS];
259 struct work_struct phy_events[PHY_NUM_EVENTS]; 269 struct asd_sas_event phy_events[PHY_NUM_EVENTS];
260 270
261 unsigned long port_events_pending; 271 unsigned long port_events_pending;
262 unsigned long phy_events_pending; 272 unsigned long phy_events_pending;
@@ -308,10 +318,15 @@ struct scsi_core {
308 int queue_thread_kill; 318 int queue_thread_kill;
309}; 319};
310 320
321struct sas_ha_event {
322 struct work_struct work;
323 struct sas_ha_struct *ha;
324};
325
311struct sas_ha_struct { 326struct sas_ha_struct {
312/* private: */ 327/* private: */
313 spinlock_t event_lock; 328 spinlock_t event_lock;
314 struct work_struct ha_events[HA_NUM_EVENTS]; 329 struct sas_ha_event ha_events[HA_NUM_EVENTS];
315 unsigned long pending; 330 unsigned long pending;
316 331
317 struct scsi_core core; 332 struct scsi_core core;
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index fd352323378b..798f7c7ee426 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -206,9 +206,9 @@ struct fc_rport { /* aka fc_starget_attrs */
206 u8 flags; 206 u8 flags;
207 struct list_head peers; 207 struct list_head peers;
208 struct device dev; 208 struct device dev;
209 struct work_struct dev_loss_work; 209 struct delayed_work dev_loss_work;
210 struct work_struct scan_work; 210 struct work_struct scan_work;
211 struct work_struct fail_io_work; 211 struct delayed_work fail_io_work;
212 struct work_struct stgt_delete_work; 212 struct work_struct stgt_delete_work;
213 struct work_struct rport_delete_work; 213 struct work_struct rport_delete_work;
214} __attribute__((aligned(sizeof(unsigned long)))); 214} __attribute__((aligned(sizeof(unsigned long))));
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 4b95c89c95c9..d5c218ddc527 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -176,7 +176,7 @@ struct iscsi_cls_session {
176 176
177 /* recovery fields */ 177 /* recovery fields */
178 int recovery_tmo; 178 int recovery_tmo;
179 struct work_struct recovery_work; 179 struct delayed_work recovery_work;
180 180
181 int target_id; 181 int target_id;
182 182
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 4c43521cc493..33720397a904 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -511,7 +511,7 @@ struct snd_ac97 {
511#ifdef CONFIG_SND_AC97_POWER_SAVE 511#ifdef CONFIG_SND_AC97_POWER_SAVE
512 unsigned int power_up; /* power states */ 512 unsigned int power_up; /* power states */
513 struct workqueue_struct *power_workq; 513 struct workqueue_struct *power_workq;
514 struct work_struct power_work; 514 struct delayed_work power_work;
515#endif 515#endif
516 struct device dev; 516 struct device dev;
517}; 517};
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 11702aa0bea9..2ee061625fd0 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -182,7 +182,7 @@ struct ak4114 {
182 unsigned char rcs0; 182 unsigned char rcs0;
183 unsigned char rcs1; 183 unsigned char rcs1;
184 struct workqueue_struct *workqueue; 184 struct workqueue_struct *workqueue;
185 struct work_struct work; 185 struct delayed_work work;
186 void *change_callback_private; 186 void *change_callback_private;
187 void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1); 187 void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1);
188}; 188};
diff --git a/ipc/util.c b/ipc/util.c
index cd8bb14a431f..a9b7a227b8d4 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -514,6 +514,11 @@ void ipc_rcu_getref(void *ptr)
514 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; 514 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
515} 515}
516 516
517static void ipc_do_vfree(struct work_struct *work)
518{
519 vfree(container_of(work, struct ipc_rcu_sched, work));
520}
521
517/** 522/**
518 * ipc_schedule_free - free ipc + rcu space 523 * ipc_schedule_free - free ipc + rcu space
519 * @head: RCU callback structure for queued work 524 * @head: RCU callback structure for queued work
@@ -528,7 +533,7 @@ static void ipc_schedule_free(struct rcu_head *head)
528 struct ipc_rcu_sched *sched = 533 struct ipc_rcu_sched *sched =
529 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); 534 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
530 535
531 INIT_WORK(&sched->work, vfree, sched); 536 INIT_WORK(&sched->work, ipc_do_vfree);
532 schedule_work(&sched->work); 537 schedule_work(&sched->work);
533} 538}
534 539
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 2b76dee28496..8d2bea09a4ec 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -114,6 +114,7 @@ EXPORT_SYMBOL(request_module);
114#endif /* CONFIG_KMOD */ 114#endif /* CONFIG_KMOD */
115 115
116struct subprocess_info { 116struct subprocess_info {
117 struct work_struct work;
117 struct completion *complete; 118 struct completion *complete;
118 char *path; 119 char *path;
119 char **argv; 120 char **argv;
@@ -221,9 +222,10 @@ static int wait_for_helper(void *data)
221} 222}
222 223
223/* This is run by khelper thread */ 224/* This is run by khelper thread */
224static void __call_usermodehelper(void *data) 225static void __call_usermodehelper(struct work_struct *work)
225{ 226{
226 struct subprocess_info *sub_info = data; 227 struct subprocess_info *sub_info =
228 container_of(work, struct subprocess_info, work);
227 pid_t pid; 229 pid_t pid;
228 int wait = sub_info->wait; 230 int wait = sub_info->wait;
229 231
@@ -264,6 +266,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
264{ 266{
265 DECLARE_COMPLETION_ONSTACK(done); 267 DECLARE_COMPLETION_ONSTACK(done);
266 struct subprocess_info sub_info = { 268 struct subprocess_info sub_info = {
269 .work = __WORK_INITIALIZER(sub_info.work,
270 __call_usermodehelper),
267 .complete = &done, 271 .complete = &done,
268 .path = path, 272 .path = path,
269 .argv = argv, 273 .argv = argv,
@@ -272,7 +276,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
272 .wait = wait, 276 .wait = wait,
273 .retval = 0, 277 .retval = 0,
274 }; 278 };
275 DECLARE_WORK(work, __call_usermodehelper, &sub_info);
276 279
277 if (!khelper_wq) 280 if (!khelper_wq)
278 return -EBUSY; 281 return -EBUSY;
@@ -280,7 +283,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
280 if (path[0] == '\0') 283 if (path[0] == '\0')
281 return 0; 284 return 0;
282 285
283 queue_work(khelper_wq, &work); 286 queue_work(khelper_wq, &sub_info.work);
284 wait_for_completion(&done); 287 wait_for_completion(&done);
285 return sub_info.retval; 288 return sub_info.retval;
286} 289}
@@ -291,6 +294,8 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
291{ 294{
292 DECLARE_COMPLETION(done); 295 DECLARE_COMPLETION(done);
293 struct subprocess_info sub_info = { 296 struct subprocess_info sub_info = {
297 .work = __WORK_INITIALIZER(sub_info.work,
298 __call_usermodehelper),
294 .complete = &done, 299 .complete = &done,
295 .path = path, 300 .path = path,
296 .argv = argv, 301 .argv = argv,
@@ -298,7 +303,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
298 .retval = 0, 303 .retval = 0,
299 }; 304 };
300 struct file *f; 305 struct file *f;
301 DECLARE_WORK(work, __call_usermodehelper, &sub_info);
302 306
303 if (!khelper_wq) 307 if (!khelper_wq)
304 return -EBUSY; 308 return -EBUSY;
@@ -318,7 +322,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
318 } 322 }
319 sub_info.stdin = f; 323 sub_info.stdin = f;
320 324
321 queue_work(khelper_wq, &work); 325 queue_work(khelper_wq, &sub_info.work);
322 wait_for_completion(&done); 326 wait_for_completion(&done);
323 return sub_info.retval; 327 return sub_info.retval;
324} 328}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4f9c60ef95e8..1db8c72d0d38 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -31,6 +31,8 @@ struct kthread_create_info
31 /* Result passed back to kthread_create() from keventd. */ 31 /* Result passed back to kthread_create() from keventd. */
32 struct task_struct *result; 32 struct task_struct *result;
33 struct completion done; 33 struct completion done;
34
35 struct work_struct work;
34}; 36};
35 37
36struct kthread_stop_info 38struct kthread_stop_info
@@ -111,9 +113,10 @@ static int kthread(void *_create)
111} 113}
112 114
113/* We are keventd: create a thread. */ 115/* We are keventd: create a thread. */
114static void keventd_create_kthread(void *_create) 116static void keventd_create_kthread(struct work_struct *work)
115{ 117{
116 struct kthread_create_info *create = _create; 118 struct kthread_create_info *create =
119 container_of(work, struct kthread_create_info, work);
117 int pid; 120 int pid;
118 121
119 /* We want our own signal handler (we take no signals by default). */ 122 /* We want our own signal handler (we take no signals by default). */
@@ -154,20 +157,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
154 ...) 157 ...)
155{ 158{
156 struct kthread_create_info create; 159 struct kthread_create_info create;
157 DECLARE_WORK(work, keventd_create_kthread, &create);
158 160
159 create.threadfn = threadfn; 161 create.threadfn = threadfn;
160 create.data = data; 162 create.data = data;
161 init_completion(&create.started); 163 init_completion(&create.started);
162 init_completion(&create.done); 164 init_completion(&create.done);
165 INIT_WORK(&create.work, keventd_create_kthread);
163 166
164 /* 167 /*
165 * The workqueue needs to start up first: 168 * The workqueue needs to start up first:
166 */ 169 */
167 if (!helper_wq) 170 if (!helper_wq)
168 work.func(work.data); 171 create.work.func(&create.work);
169 else { 172 else {
170 queue_work(helper_wq, &work); 173 queue_work(helper_wq, &create.work);
171 wait_for_completion(&create.done); 174 wait_for_completion(&create.done);
172 } 175 }
173 if (!IS_ERR(create.result)) { 176 if (!IS_ERR(create.result)) {
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index f1f900ac3164..678ec736076b 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -16,12 +16,12 @@
16 * callback we use. 16 * callback we use.
17 */ 17 */
18 18
19static void do_poweroff(void *dummy) 19static void do_poweroff(struct work_struct *dummy)
20{ 20{
21 kernel_power_off(); 21 kernel_power_off();
22} 22}
23 23
24static DECLARE_WORK(poweroff_work, do_poweroff, NULL); 24static DECLARE_WORK(poweroff_work, do_poweroff);
25 25
26static void handle_poweroff(int key, struct tty_struct *tty) 26static void handle_poweroff(int key, struct tty_struct *tty)
27{ 27{
diff --git a/kernel/relay.c b/kernel/relay.c
index f04bbdb56ac2..2b92e8ece85b 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -308,9 +308,10 @@ static struct rchan_callbacks default_channel_callbacks = {
308 * reason waking is deferred is that calling directly from write 308 * reason waking is deferred is that calling directly from write
309 * causes problems if you're writing from say the scheduler. 309 * causes problems if you're writing from say the scheduler.
310 */ 310 */
311static void wakeup_readers(void *private) 311static void wakeup_readers(struct work_struct *work)
312{ 312{
313 struct rchan_buf *buf = private; 313 struct rchan_buf *buf =
314 container_of(work, struct rchan_buf, wake_readers.work);
314 wake_up_interruptible(&buf->read_wait); 315 wake_up_interruptible(&buf->read_wait);
315} 316}
316 317
@@ -328,7 +329,7 @@ static inline void __relay_reset(struct rchan_buf *buf, unsigned int init)
328 if (init) { 329 if (init) {
329 init_waitqueue_head(&buf->read_wait); 330 init_waitqueue_head(&buf->read_wait);
330 kref_init(&buf->kref); 331 kref_init(&buf->kref);
331 INIT_WORK(&buf->wake_readers, NULL, NULL); 332 INIT_DELAYED_WORK(&buf->wake_readers, NULL);
332 } else { 333 } else {
333 cancel_delayed_work(&buf->wake_readers); 334 cancel_delayed_work(&buf->wake_readers);
334 flush_scheduled_work(); 335 flush_scheduled_work();
@@ -549,7 +550,8 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
549 buf->padding[old_subbuf]; 550 buf->padding[old_subbuf];
550 smp_mb(); 551 smp_mb();
551 if (waitqueue_active(&buf->read_wait)) { 552 if (waitqueue_active(&buf->read_wait)) {
552 PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf); 553 PREPARE_DELAYED_WORK(&buf->wake_readers,
554 wakeup_readers);
553 schedule_delayed_work(&buf->wake_readers, 1); 555 schedule_delayed_work(&buf->wake_readers, 1);
554 } 556 }
555 } 557 }
diff --git a/kernel/sys.c b/kernel/sys.c
index 98489d82801b..c87b461de38d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
880 return 0; 880 return 0;
881} 881}
882 882
883static void deferred_cad(void *dummy) 883static void deferred_cad(struct work_struct *dummy)
884{ 884{
885 kernel_restart(NULL); 885 kernel_restart(NULL);
886} 886}
@@ -892,7 +892,7 @@ static void deferred_cad(void *dummy)
892 */ 892 */
893void ctrl_alt_del(void) 893void ctrl_alt_del(void)
894{ 894{
895 static DECLARE_WORK(cad_work, deferred_cad, NULL); 895 static DECLARE_WORK(cad_work, deferred_cad);
896 896
897 if (C_A_D) 897 if (C_A_D)
898 schedule_work(&cad_work); 898 schedule_work(&cad_work);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 17c2f03d2c27..8d1e7cb8a51a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -80,6 +80,29 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
80 return list_empty(&wq->list); 80 return list_empty(&wq->list);
81} 81}
82 82
83static inline void set_wq_data(struct work_struct *work, void *wq)
84{
85 unsigned long new, old, res;
86
87 /* assume the pending flag is already set and that the task has already
88 * been queued on this workqueue */
89 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
90 res = work->management;
91 if (res != new) {
92 do {
93 old = res;
94 new = (unsigned long) wq;
95 new |= (old & WORK_STRUCT_FLAG_MASK);
96 res = cmpxchg(&work->management, old, new);
97 } while (res != old);
98 }
99}
100
101static inline void *get_wq_data(struct work_struct *work)
102{
103 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
104}
105
83/* Preempt must be disabled. */ 106/* Preempt must be disabled. */
84static void __queue_work(struct cpu_workqueue_struct *cwq, 107static void __queue_work(struct cpu_workqueue_struct *cwq,
85 struct work_struct *work) 108 struct work_struct *work)
@@ -87,7 +110,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
87 unsigned long flags; 110 unsigned long flags;
88 111
89 spin_lock_irqsave(&cwq->lock, flags); 112 spin_lock_irqsave(&cwq->lock, flags);
90 work->wq_data = cwq; 113 set_wq_data(work, cwq);
91 list_add_tail(&work->entry, &cwq->worklist); 114 list_add_tail(&work->entry, &cwq->worklist);
92 cwq->insert_sequence++; 115 cwq->insert_sequence++;
93 wake_up(&cwq->more_work); 116 wake_up(&cwq->more_work);
@@ -108,7 +131,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
108{ 131{
109 int ret = 0, cpu = get_cpu(); 132 int ret = 0, cpu = get_cpu();
110 133
111 if (!test_and_set_bit(0, &work->pending)) { 134 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
112 if (unlikely(is_single_threaded(wq))) 135 if (unlikely(is_single_threaded(wq)))
113 cpu = singlethread_cpu; 136 cpu = singlethread_cpu;
114 BUG_ON(!list_empty(&work->entry)); 137 BUG_ON(!list_empty(&work->entry));
@@ -122,38 +145,42 @@ EXPORT_SYMBOL_GPL(queue_work);
122 145
123static void delayed_work_timer_fn(unsigned long __data) 146static void delayed_work_timer_fn(unsigned long __data)
124{ 147{
125 struct work_struct *work = (struct work_struct *)__data; 148 struct delayed_work *dwork = (struct delayed_work *)__data;
126 struct workqueue_struct *wq = work->wq_data; 149 struct workqueue_struct *wq = get_wq_data(&dwork->work);
127 int cpu = smp_processor_id(); 150 int cpu = smp_processor_id();
128 151
129 if (unlikely(is_single_threaded(wq))) 152 if (unlikely(is_single_threaded(wq)))
130 cpu = singlethread_cpu; 153 cpu = singlethread_cpu;
131 154
132 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 155 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
133} 156}
134 157
135/** 158/**
136 * queue_delayed_work - queue work on a workqueue after delay 159 * queue_delayed_work - queue work on a workqueue after delay
137 * @wq: workqueue to use 160 * @wq: workqueue to use
138 * @work: work to queue 161 * @work: delayable work to queue
139 * @delay: number of jiffies to wait before queueing 162 * @delay: number of jiffies to wait before queueing
140 * 163 *
141 * Returns 0 if @work was already on a queue, non-zero otherwise. 164 * Returns 0 if @work was already on a queue, non-zero otherwise.
142 */ 165 */
143int fastcall queue_delayed_work(struct workqueue_struct *wq, 166int fastcall queue_delayed_work(struct workqueue_struct *wq,
144 struct work_struct *work, unsigned long delay) 167 struct delayed_work *dwork, unsigned long delay)
145{ 168{
146 int ret = 0; 169 int ret = 0;
147 struct timer_list *timer = &work->timer; 170 struct timer_list *timer = &dwork->timer;
171 struct work_struct *work = &dwork->work;
172
173 if (delay == 0)
174 return queue_work(wq, work);
148 175
149 if (!test_and_set_bit(0, &work->pending)) { 176 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
150 BUG_ON(timer_pending(timer)); 177 BUG_ON(timer_pending(timer));
151 BUG_ON(!list_empty(&work->entry)); 178 BUG_ON(!list_empty(&work->entry));
152 179
153 /* This stores wq for the moment, for the timer_fn */ 180 /* This stores wq for the moment, for the timer_fn */
154 work->wq_data = wq; 181 set_wq_data(work, wq);
155 timer->expires = jiffies + delay; 182 timer->expires = jiffies + delay;
156 timer->data = (unsigned long)work; 183 timer->data = (unsigned long)dwork;
157 timer->function = delayed_work_timer_fn; 184 timer->function = delayed_work_timer_fn;
158 add_timer(timer); 185 add_timer(timer);
159 ret = 1; 186 ret = 1;
@@ -172,19 +199,20 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
172 * Returns 0 if @work was already on a queue, non-zero otherwise. 199 * Returns 0 if @work was already on a queue, non-zero otherwise.
173 */ 200 */
174int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 201int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
175 struct work_struct *work, unsigned long delay) 202 struct delayed_work *dwork, unsigned long delay)
176{ 203{
177 int ret = 0; 204 int ret = 0;
178 struct timer_list *timer = &work->timer; 205 struct timer_list *timer = &dwork->timer;
206 struct work_struct *work = &dwork->work;
179 207
180 if (!test_and_set_bit(0, &work->pending)) { 208 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
181 BUG_ON(timer_pending(timer)); 209 BUG_ON(timer_pending(timer));
182 BUG_ON(!list_empty(&work->entry)); 210 BUG_ON(!list_empty(&work->entry));
183 211
184 /* This stores wq for the moment, for the timer_fn */ 212 /* This stores wq for the moment, for the timer_fn */
185 work->wq_data = wq; 213 set_wq_data(work, wq);
186 timer->expires = jiffies + delay; 214 timer->expires = jiffies + delay;
187 timer->data = (unsigned long)work; 215 timer->data = (unsigned long)dwork;
188 timer->function = delayed_work_timer_fn; 216 timer->function = delayed_work_timer_fn;
189 add_timer_on(timer, cpu); 217 add_timer_on(timer, cpu);
190 ret = 1; 218 ret = 1;
@@ -212,15 +240,15 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
212 while (!list_empty(&cwq->worklist)) { 240 while (!list_empty(&cwq->worklist)) {
213 struct work_struct *work = list_entry(cwq->worklist.next, 241 struct work_struct *work = list_entry(cwq->worklist.next,
214 struct work_struct, entry); 242 struct work_struct, entry);
215 void (*f) (void *) = work->func; 243 work_func_t f = work->func;
216 void *data = work->data;
217 244
218 list_del_init(cwq->worklist.next); 245 list_del_init(cwq->worklist.next);
219 spin_unlock_irqrestore(&cwq->lock, flags); 246 spin_unlock_irqrestore(&cwq->lock, flags);
220 247
221 BUG_ON(work->wq_data != cwq); 248 BUG_ON(get_wq_data(work) != cwq);
222 clear_bit(0, &work->pending); 249 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
223 f(data); 250 work_release(work);
251 f(work);
224 252
225 spin_lock_irqsave(&cwq->lock, flags); 253 spin_lock_irqsave(&cwq->lock, flags);
226 cwq->remove_sequence++; 254 cwq->remove_sequence++;
@@ -468,38 +496,37 @@ EXPORT_SYMBOL(schedule_work);
468 496
469/** 497/**
470 * schedule_delayed_work - put work task in global workqueue after delay 498 * schedule_delayed_work - put work task in global workqueue after delay
471 * @work: job to be done 499 * @dwork: job to be done
472 * @delay: number of jiffies to wait 500 * @delay: number of jiffies to wait or 0 for immediate execution
473 * 501 *
474 * After waiting for a given time this puts a job in the kernel-global 502 * After waiting for a given time this puts a job in the kernel-global
475 * workqueue. 503 * workqueue.
476 */ 504 */
477int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 505int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
478{ 506{
479 return queue_delayed_work(keventd_wq, work, delay); 507 return queue_delayed_work(keventd_wq, dwork, delay);
480} 508}
481EXPORT_SYMBOL(schedule_delayed_work); 509EXPORT_SYMBOL(schedule_delayed_work);
482 510
483/** 511/**
484 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 512 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
485 * @cpu: cpu to use 513 * @cpu: cpu to use
486 * @work: job to be done 514 * @dwork: job to be done
487 * @delay: number of jiffies to wait 515 * @delay: number of jiffies to wait
488 * 516 *
489 * After waiting for a given time this puts a job in the kernel-global 517 * After waiting for a given time this puts a job in the kernel-global
490 * workqueue on the specified CPU. 518 * workqueue on the specified CPU.
491 */ 519 */
492int schedule_delayed_work_on(int cpu, 520int schedule_delayed_work_on(int cpu,
493 struct work_struct *work, unsigned long delay) 521 struct delayed_work *dwork, unsigned long delay)
494{ 522{
495 return queue_delayed_work_on(cpu, keventd_wq, work, delay); 523 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
496} 524}
497EXPORT_SYMBOL(schedule_delayed_work_on); 525EXPORT_SYMBOL(schedule_delayed_work_on);
498 526
499/** 527/**
500 * schedule_on_each_cpu - call a function on each online CPU from keventd 528 * schedule_on_each_cpu - call a function on each online CPU from keventd
501 * @func: the function to call 529 * @func: the function to call
502 * @info: a pointer to pass to func()
503 * 530 *
504 * Returns zero on success. 531 * Returns zero on success.
505 * Returns -ve errno on failure. 532 * Returns -ve errno on failure.
@@ -508,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
508 * 535 *
509 * schedule_on_each_cpu() is very slow. 536 * schedule_on_each_cpu() is very slow.
510 */ 537 */
511int schedule_on_each_cpu(void (*func)(void *info), void *info) 538int schedule_on_each_cpu(work_func_t func)
512{ 539{
513 int cpu; 540 int cpu;
514 struct work_struct *works; 541 struct work_struct *works;
@@ -519,7 +546,7 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
519 546
520 mutex_lock(&workqueue_mutex); 547 mutex_lock(&workqueue_mutex);
521 for_each_online_cpu(cpu) { 548 for_each_online_cpu(cpu) {
522 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 549 INIT_WORK(per_cpu_ptr(works, cpu), func);
523 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 550 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
524 per_cpu_ptr(works, cpu)); 551 per_cpu_ptr(works, cpu));
525 } 552 }
@@ -539,12 +566,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
539 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 566 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
540 * work whose handler rearms the delayed work. 567 * work whose handler rearms the delayed work.
541 * @wq: the controlling workqueue structure 568 * @wq: the controlling workqueue structure
542 * @work: the delayed work struct 569 * @dwork: the delayed work struct
543 */ 570 */
544void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 571void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
545 struct work_struct *work) 572 struct delayed_work *dwork)
546{ 573{
547 while (!cancel_delayed_work(work)) 574 while (!cancel_delayed_work(dwork))
548 flush_workqueue(wq); 575 flush_workqueue(wq);
549} 576}
550EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 577EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,18 +579,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
552/** 579/**
553 * cancel_rearming_delayed_work - reliably kill off a delayed keventd 580 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
554 * work whose handler rearms the delayed work. 581 * work whose handler rearms the delayed work.
555 * @work: the delayed work struct 582 * @dwork: the delayed work struct
556 */ 583 */
557void cancel_rearming_delayed_work(struct work_struct *work) 584void cancel_rearming_delayed_work(struct delayed_work *dwork)
558{ 585{
559 cancel_rearming_delayed_workqueue(keventd_wq, work); 586 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
560} 587}
561EXPORT_SYMBOL(cancel_rearming_delayed_work); 588EXPORT_SYMBOL(cancel_rearming_delayed_work);
562 589
563/** 590/**
564 * execute_in_process_context - reliably execute the routine with user context 591 * execute_in_process_context - reliably execute the routine with user context
565 * @fn: the function to execute 592 * @fn: the function to execute
566 * @data: data to pass to the function
567 * @ew: guaranteed storage for the execute work structure (must 593 * @ew: guaranteed storage for the execute work structure (must
568 * be available when the work executes) 594 * be available when the work executes)
569 * 595 *
@@ -573,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
573 * Returns: 0 - function was executed 599 * Returns: 0 - function was executed
574 * 1 - function was scheduled for execution 600 * 1 - function was scheduled for execution
575 */ 601 */
576int execute_in_process_context(void (*fn)(void *data), void *data, 602int execute_in_process_context(work_func_t fn, struct execute_work *ew)
577 struct execute_work *ew)
578{ 603{
579 if (!in_interrupt()) { 604 if (!in_interrupt()) {
580 fn(data); 605 fn(&ew->work);
581 return 0; 606 return 0;
582 } 607 }
583 608
584 INIT_WORK(&ew->work, fn, data); 609 INIT_WORK(&ew->work, fn);
585 schedule_work(&ew->work); 610 schedule_work(&ew->work);
586 611
587 return 1; 612 return 1;
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e34eddc..5de81473df34 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
313static void free_block(struct kmem_cache *cachep, void **objpp, int len, 313static void free_block(struct kmem_cache *cachep, void **objpp, int len,
314 int node); 314 int node);
315static int enable_cpucache(struct kmem_cache *cachep); 315static int enable_cpucache(struct kmem_cache *cachep);
316static void cache_reap(void *unused); 316static void cache_reap(struct work_struct *unused);
317 317
318/* 318/*
319 * This function must be completely optimized away if a constant is passed to 319 * This function must be completely optimized away if a constant is passed to
@@ -753,7 +753,7 @@ int slab_is_available(void)
753 return g_cpucache_up == FULL; 753 return g_cpucache_up == FULL;
754} 754}
755 755
756static DEFINE_PER_CPU(struct work_struct, reap_work); 756static DEFINE_PER_CPU(struct delayed_work, reap_work);
757 757
758static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 758static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
759{ 759{
@@ -916,16 +916,16 @@ static void next_reap_node(void)
916 */ 916 */
917static void __devinit start_cpu_timer(int cpu) 917static void __devinit start_cpu_timer(int cpu)
918{ 918{
919 struct work_struct *reap_work = &per_cpu(reap_work, cpu); 919 struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
920 920
921 /* 921 /*
922 * When this gets called from do_initcalls via cpucache_init(), 922 * When this gets called from do_initcalls via cpucache_init(),
923 * init_workqueues() has already run, so keventd will be setup 923 * init_workqueues() has already run, so keventd will be setup
924 * at that time. 924 * at that time.
925 */ 925 */
926 if (keventd_up() && reap_work->func == NULL) { 926 if (keventd_up() && reap_work->work.func == NULL) {
927 init_reap_node(cpu); 927 init_reap_node(cpu);
928 INIT_WORK(reap_work, cache_reap, NULL); 928 INIT_DELAYED_WORK(reap_work, cache_reap);
929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
930 } 930 }
931} 931}
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3815 * If we cannot acquire the cache chain mutex then just give up - we'll try 3815 * If we cannot acquire the cache chain mutex then just give up - we'll try
3816 * again on the next iteration. 3816 * again on the next iteration.
3817 */ 3817 */
3818static void cache_reap(void *unused) 3818static void cache_reap(struct work_struct *unused)
3819{ 3819{
3820 struct kmem_cache *searchp; 3820 struct kmem_cache *searchp;
3821 struct kmem_list3 *l3; 3821 struct kmem_list3 *l3;
diff --git a/mm/swap.c b/mm/swap.c
index 2e0e871f542f..d9a3770d8f3c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -216,7 +216,7 @@ void lru_add_drain(void)
216} 216}
217 217
218#ifdef CONFIG_NUMA 218#ifdef CONFIG_NUMA
219static void lru_add_drain_per_cpu(void *dummy) 219static void lru_add_drain_per_cpu(struct work_struct *dummy)
220{ 220{
221 lru_add_drain(); 221 lru_add_drain();
222} 222}
@@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy)
226 */ 226 */
227int lru_add_drain_all(void) 227int lru_add_drain_all(void)
228{ 228{
229 return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); 229 return schedule_on_each_cpu(lru_add_drain_per_cpu);
230} 230}
231 231
232#else 232#else
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5946ec63724f..3fc0abeeaf34 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr,
1454 1454
1455#define LEC_ARP_REFRESH_INTERVAL (3*HZ) 1455#define LEC_ARP_REFRESH_INTERVAL (3*HZ)
1456 1456
1457static void lec_arp_check_expire(void *data); 1457static void lec_arp_check_expire(struct work_struct *work);
1458static void lec_arp_expire_arp(unsigned long data); 1458static void lec_arp_expire_arp(unsigned long data);
1459 1459
1460/* 1460/*
@@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv)
1477 INIT_HLIST_HEAD(&priv->lec_no_forward); 1477 INIT_HLIST_HEAD(&priv->lec_no_forward);
1478 INIT_HLIST_HEAD(&priv->mcast_fwds); 1478 INIT_HLIST_HEAD(&priv->mcast_fwds);
1479 spin_lock_init(&priv->lec_arp_lock); 1479 spin_lock_init(&priv->lec_arp_lock);
1480 INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); 1480 INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
1481 schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); 1481 schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
1482} 1482}
1483 1483
@@ -1875,10 +1875,11 @@ static void lec_arp_expire_vcc(unsigned long data)
1875 * to ESI_FORWARD_DIRECT. This causes the flush period to end 1875 * to ESI_FORWARD_DIRECT. This causes the flush period to end
1876 * regardless of the progress of the flush protocol. 1876 * regardless of the progress of the flush protocol.
1877 */ 1877 */
1878static void lec_arp_check_expire(void *data) 1878static void lec_arp_check_expire(struct work_struct *work)
1879{ 1879{
1880 unsigned long flags; 1880 unsigned long flags;
1881 struct lec_priv *priv = data; 1881 struct lec_priv *priv =
1882 container_of(work, struct lec_priv, lec_arp_work.work);
1882 struct hlist_node *node, *next; 1883 struct hlist_node *node, *next;
1883 struct lec_arp_table *entry; 1884 struct lec_arp_table *entry;
1884 unsigned long now; 1885 unsigned long now;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 24cc95f86741..99136babd535 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -92,7 +92,7 @@ struct lec_priv {
92 spinlock_t lec_arp_lock; 92 spinlock_t lec_arp_lock;
93 struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ 93 struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */
94 struct atm_vcc *lecd; 94 struct atm_vcc *lecd;
95 struct work_struct lec_arp_work; /* C10 */ 95 struct delayed_work lec_arp_work; /* C10 */
96 unsigned int maximum_unknown_frame_count; 96 unsigned int maximum_unknown_frame_count;
97 /* 97 /*
98 * Within the period of time defined by this variable, the client will send 98 * Within the period of time defined by this variable, the client will send
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3eeeb7a86e75..d4c935692ccf 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -237,9 +237,9 @@ static void bt_release(struct device *dev)
237 kfree(data); 237 kfree(data);
238} 238}
239 239
240static void add_conn(void *data) 240static void add_conn(struct work_struct *work)
241{ 241{
242 struct hci_conn *conn = data; 242 struct hci_conn *conn = container_of(work, struct hci_conn, work);
243 int i; 243 int i;
244 244
245 if (device_register(&conn->dev) < 0) { 245 if (device_register(&conn->dev) < 0) {
@@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
272 272
273 dev_set_drvdata(&conn->dev, conn); 273 dev_set_drvdata(&conn->dev, conn);
274 274
275 INIT_WORK(&conn->work, add_conn, (void *) conn); 275 INIT_WORK(&conn->work, add_conn);
276 276
277 schedule_work(&conn->work); 277 schedule_work(&conn->work);
278} 278}
279 279
280static void del_conn(void *data) 280static void del_conn(struct work_struct *work)
281{ 281{
282 struct hci_conn *conn = data; 282 struct hci_conn *conn = container_of(work, struct hci_conn, work);
283 device_del(&conn->dev); 283 device_del(&conn->dev);
284} 284}
285 285
@@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
287{ 287{
288 BT_DBG("conn %p", conn); 288 BT_DBG("conn %p", conn);
289 289
290 INIT_WORK(&conn->work, del_conn, (void *) conn); 290 INIT_WORK(&conn->work, del_conn);
291 291
292 schedule_work(&conn->work); 292 schedule_work(&conn->work);
293} 293}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f753c40c11d2..55bb2634c088 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev)
77 * Called from work queue to allow for calling functions that 77 * Called from work queue to allow for calling functions that
78 * might sleep (such as speed check), and to debounce. 78 * might sleep (such as speed check), and to debounce.
79 */ 79 */
80static void port_carrier_check(void *arg) 80static void port_carrier_check(struct work_struct *work)
81{ 81{
82 struct net_device *dev = arg;
83 struct net_bridge_port *p; 82 struct net_bridge_port *p;
83 struct net_device *dev;
84 struct net_bridge *br; 84 struct net_bridge *br;
85 85
86 dev = container_of(work, struct net_bridge_port,
87 carrier_check.work)->dev;
88 work_release(work);
89
86 rtnl_lock(); 90 rtnl_lock();
87 p = dev->br_port; 91 p = dev->br_port;
88 if (!p) 92 if (!p)
@@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
276 p->port_no = index; 280 p->port_no = index;
277 br_init_port(p); 281 br_init_port(p);
278 p->state = BR_STATE_DISABLED; 282 p->state = BR_STATE_DISABLED;
279 INIT_WORK(&p->carrier_check, port_carrier_check, dev); 283 INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
280 br_stp_port_timer_init(p); 284 br_stp_port_timer_init(p);
281 285
282 kobject_init(&p->kobj); 286 kobject_init(&p->kobj);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 74258d86f256..3a534e94c7f3 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,7 +82,7 @@ struct net_bridge_port
82 struct timer_list hold_timer; 82 struct timer_list hold_timer;
83 struct timer_list message_age_timer; 83 struct timer_list message_age_timer;
84 struct kobject kobj; 84 struct kobject kobj;
85 struct work_struct carrier_check; 85 struct delayed_work carrier_check;
86 struct rcu_head rcu; 86 struct rcu_head rcu;
87}; 87};
88 88
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4b36114744c5..549a2ce951b0 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -34,8 +34,8 @@ enum lw_bits {
34static unsigned long linkwatch_flags; 34static unsigned long linkwatch_flags;
35static unsigned long linkwatch_nextevent; 35static unsigned long linkwatch_nextevent;
36 36
37static void linkwatch_event(void *dummy); 37static void linkwatch_event(struct work_struct *dummy);
38static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); 38static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
39 39
40static LIST_HEAD(lweventlist); 40static LIST_HEAD(lweventlist);
41static DEFINE_SPINLOCK(lweventlist_lock); 41static DEFINE_SPINLOCK(lweventlist_lock);
@@ -127,7 +127,7 @@ void linkwatch_run_queue(void)
127} 127}
128 128
129 129
130static void linkwatch_event(void *dummy) 130static void linkwatch_event(struct work_struct *dummy)
131{ 131{
132 /* Limit the number of linkwatch events to one 132 /* Limit the number of linkwatch events to one
133 * per second so that a runaway driver does not 133 * per second so that a runaway driver does not
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
171 unsigned long delay = linkwatch_nextevent - jiffies; 171 unsigned long delay = linkwatch_nextevent - jiffies;
172 172
173 /* If we wrap around we'll delay it by at most HZ. */ 173 /* If we wrap around we'll delay it by at most HZ. */
174 if (!delay || delay > HZ) 174 if (delay > HZ)
175 schedule_work(&linkwatch_work); 175 delay = 0;
176 else 176 schedule_delayed_work(&linkwatch_work, delay);
177 schedule_delayed_work(&linkwatch_work, delay);
178 } 177 }
179 } 178 }
180} 179}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3c58846fcaa5..b3c559b9ac35 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -50,9 +50,10 @@ static atomic_t trapped;
50static void zap_completion_queue(void); 50static void zap_completion_queue(void);
51static void arp_reply(struct sk_buff *skb); 51static void arp_reply(struct sk_buff *skb);
52 52
53static void queue_process(void *p) 53static void queue_process(struct work_struct *work)
54{ 54{
55 struct netpoll_info *npinfo = p; 55 struct netpoll_info *npinfo =
56 container_of(work, struct netpoll_info, tx_work.work);
56 struct sk_buff *skb; 57 struct sk_buff *skb;
57 58
58 while ((skb = skb_dequeue(&npinfo->txq))) { 59 while ((skb = skb_dequeue(&npinfo->txq))) {
@@ -72,8 +73,6 @@ static void queue_process(void *p)
72 schedule_delayed_work(&npinfo->tx_work, HZ/10); 73 schedule_delayed_work(&npinfo->tx_work, HZ/10);
73 return; 74 return;
74 } 75 }
75
76 netif_tx_unlock_bh(dev);
77 } 76 }
78} 77}
79 78
@@ -263,7 +262,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
263 262
264 if (status != NETDEV_TX_OK) { 263 if (status != NETDEV_TX_OK) {
265 skb_queue_tail(&npinfo->txq, skb); 264 skb_queue_tail(&npinfo->txq, skb);
266 schedule_work(&npinfo->tx_work); 265 schedule_delayed_work(&npinfo->tx_work,0);
267 } 266 }
268} 267}
269 268
@@ -628,7 +627,7 @@ int netpoll_setup(struct netpoll *np)
628 spin_lock_init(&npinfo->rx_lock); 627 spin_lock_init(&npinfo->rx_lock);
629 skb_queue_head_init(&npinfo->arp_tx); 628 skb_queue_head_init(&npinfo->arp_tx);
630 skb_queue_head_init(&npinfo->txq); 629 skb_queue_head_init(&npinfo->txq);
631 INIT_WORK(&npinfo->tx_work, queue_process, npinfo); 630 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
632 631
633 atomic_set(&npinfo->refcnt, 1); 632 atomic_set(&npinfo->refcnt, 1);
634 } else { 633 } else {
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 7b52f2a03eef..4c9e26775f72 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -32,8 +32,7 @@ struct inet_timewait_death_row dccp_death_row = {
32 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 32 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
33 (unsigned long)&dccp_death_row), 33 (unsigned long)&dccp_death_row),
34 .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, 34 .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
35 inet_twdr_twkill_work, 35 inet_twdr_twkill_work),
36 &dccp_death_row),
37/* Short-time timewait calendar */ 36/* Short-time timewait calendar */
38 37
39 .twcal_hand = -1, 38 .twcal_hand = -1,
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index cf51c87a971d..08386c102954 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
58} 58}
59 59
60void 60void
61ieee80211softmac_assoc_timeout(void *d) 61ieee80211softmac_assoc_timeout(struct work_struct *work)
62{ 62{
63 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; 63 struct ieee80211softmac_device *mac =
64 container_of(work, struct ieee80211softmac_device,
65 associnfo.timeout.work);
64 struct ieee80211softmac_network *n; 66 struct ieee80211softmac_network *n;
65 67
66 mutex_lock(&mac->associnfo.mutex); 68 mutex_lock(&mac->associnfo.mutex);
@@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void
186 188
187/* This function is called to handle userspace requests (asynchronously) */ 189/* This function is called to handle userspace requests (asynchronously) */
188void 190void
189ieee80211softmac_assoc_work(void *d) 191ieee80211softmac_assoc_work(struct work_struct *work)
190{ 192{
191 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; 193 struct ieee80211softmac_device *mac =
194 container_of(work, struct ieee80211softmac_device,
195 associnfo.work.work);
192 struct ieee80211softmac_network *found = NULL; 196 struct ieee80211softmac_network *found = NULL;
193 struct ieee80211_network *net = NULL, *best = NULL; 197 struct ieee80211_network *net = NULL, *best = NULL;
194 int bssvalid; 198 int bssvalid;
@@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
412 network->authenticated = 0; 416 network->authenticated = 0;
413 /* we don't want to do this more than once ... */ 417 /* we don't want to do this more than once ... */
414 network->auth_desynced_once = 1; 418 network->auth_desynced_once = 1;
415 schedule_work(&mac->associnfo.work); 419 schedule_delayed_work(&mac->associnfo.work, 0);
416 break; 420 break;
417 } 421 }
418 default: 422 default:
@@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
446 ieee80211softmac_disassoc(mac); 450 ieee80211softmac_disassoc(mac);
447 451
448 /* try to reassociate */ 452 /* try to reassociate */
449 schedule_work(&mac->associnfo.work); 453 schedule_delayed_work(&mac->associnfo.work, 0);
450 454
451 return 0; 455 return 0;
452} 456}
@@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev,
466 dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); 470 dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
467 return 0; 471 return 0;
468 } 472 }
469 schedule_work(&mac->associnfo.work); 473 schedule_delayed_work(&mac->associnfo.work, 0);
470 474
471 return 0; 475 return 0;
472} 476}
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 0612015f1c78..6012705aa4f8 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -26,7 +26,7 @@
26 26
27#include "ieee80211softmac_priv.h" 27#include "ieee80211softmac_priv.h"
28 28
29static void ieee80211softmac_auth_queue(void *data); 29static void ieee80211softmac_auth_queue(struct work_struct *work);
30 30
31/* Queues an auth request to the desired AP */ 31/* Queues an auth request to the desired AP */
32int 32int
@@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
54 auth->mac = mac; 54 auth->mac = mac;
55 auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; 55 auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
56 auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; 56 auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
57 INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); 57 INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
58 58
59 /* Lock (for list) */ 59 /* Lock (for list) */
60 spin_lock_irqsave(&mac->lock, flags); 60 spin_lock_irqsave(&mac->lock, flags);
61 61
62 /* add to list */ 62 /* add to list */
63 list_add_tail(&auth->list, &mac->auth_queue); 63 list_add_tail(&auth->list, &mac->auth_queue);
64 schedule_work(&auth->work); 64 schedule_delayed_work(&auth->work, 0);
65 spin_unlock_irqrestore(&mac->lock, flags); 65 spin_unlock_irqrestore(&mac->lock, flags);
66 66
67 return 0; 67 return 0;
@@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
70 70
71/* Sends an auth request to the desired AP and handles timeouts */ 71/* Sends an auth request to the desired AP and handles timeouts */
72static void 72static void
73ieee80211softmac_auth_queue(void *data) 73ieee80211softmac_auth_queue(struct work_struct *work)
74{ 74{
75 struct ieee80211softmac_device *mac; 75 struct ieee80211softmac_device *mac;
76 struct ieee80211softmac_auth_queue_item *auth; 76 struct ieee80211softmac_auth_queue_item *auth;
77 struct ieee80211softmac_network *net; 77 struct ieee80211softmac_network *net;
78 unsigned long flags; 78 unsigned long flags;
79 79
80 auth = (struct ieee80211softmac_auth_queue_item *)data; 80 auth = container_of(work, struct ieee80211softmac_auth_queue_item,
81 work.work);
81 net = auth->net; 82 net = auth->net;
82 mac = auth->mac; 83 mac = auth->mac;
83 84
@@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data)
118 119
119/* Sends a response to an auth challenge (for shared key auth). */ 120/* Sends a response to an auth challenge (for shared key auth). */
120static void 121static void
121ieee80211softmac_auth_challenge_response(void *_aq) 122ieee80211softmac_auth_challenge_response(struct work_struct *work)
122{ 123{
123 struct ieee80211softmac_auth_queue_item *aq = _aq; 124 struct ieee80211softmac_auth_queue_item *aq =
125 container_of(work, struct ieee80211softmac_auth_queue_item,
126 work.work);
124 127
125 /* Send our response */ 128 /* Send our response */
126 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); 129 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
@@ -234,8 +237,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
234 * we have obviously already sent the initial auth 237 * we have obviously already sent the initial auth
235 * request. */ 238 * request. */
236 cancel_delayed_work(&aq->work); 239 cancel_delayed_work(&aq->work);
237 INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); 240 INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
238 schedule_work(&aq->work); 241 schedule_delayed_work(&aq->work, 0);
239 spin_unlock_irqrestore(&mac->lock, flags); 242 spin_unlock_irqrestore(&mac->lock, flags);
240 return 0; 243 return 0;
241 case IEEE80211SOFTMAC_AUTH_SHARED_PASS: 244 case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@ -398,6 +401,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
398 ieee80211softmac_deauth_from_net(mac, net); 401 ieee80211softmac_deauth_from_net(mac, net);
399 402
400 /* let's try to re-associate */ 403 /* let's try to re-associate */
401 schedule_work(&mac->associnfo.work); 404 schedule_delayed_work(&mac->associnfo.work, 0);
402 return 0; 405 return 0;
403} 406}
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index f34fa2ef666b..b9015656cfb3 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = {
73 73
74 74
75static void 75static void
76ieee80211softmac_notify_callback(void *d) 76ieee80211softmac_notify_callback(struct work_struct *work)
77{ 77{
78 struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; 78 struct ieee80211softmac_event *pevent =
79 kfree(d); 79 container_of(work, struct ieee80211softmac_event, work.work);
80 struct ieee80211softmac_event event = *pevent;
81 kfree(pevent);
80 82
81 event.fun(event.mac->dev, event.event_type, event.context); 83 event.fun(event.mac->dev, event.event_type, event.context);
82} 84}
@@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
99 return -ENOMEM; 101 return -ENOMEM;
100 102
101 eventptr->event_type = event; 103 eventptr->event_type = event;
102 INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); 104 INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
103 eventptr->fun = fun; 105 eventptr->fun = fun;
104 eventptr->context = context; 106 eventptr->context = context;
105 eventptr->mac = mac; 107 eventptr->mac = mac;
@@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
170 /* User may have subscribed to ANY event, so 172 /* User may have subscribed to ANY event, so
171 * we tell them which event triggered it. */ 173 * we tell them which event triggered it. */
172 eventptr->event_type = event; 174 eventptr->event_type = event;
173 schedule_work(&eventptr->work); 175 schedule_delayed_work(&eventptr->work, 0);
174 } 176 }
175 } 177 }
176} 178}
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 33aff4f4a471..256207b71dc9 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
58 INIT_LIST_HEAD(&softmac->events); 58 INIT_LIST_HEAD(&softmac->events);
59 59
60 mutex_init(&softmac->associnfo.mutex); 60 mutex_init(&softmac->associnfo.mutex);
61 INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); 61 INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
62 INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); 62 INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
63 softmac->start_scan = ieee80211softmac_start_scan_implementation; 63 softmac->start_scan = ieee80211softmac_start_scan_implementation;
64 softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; 64 softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
65 softmac->stop_scan = ieee80211softmac_stop_scan_implementation; 65 softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 0642e090b8a7..c0dbe070e548 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -78,7 +78,7 @@
78/* private definitions and prototypes */ 78/* private definitions and prototypes */
79 79
80/*** prototypes from _scan.c */ 80/*** prototypes from _scan.c */
81void ieee80211softmac_scan(void *sm); 81void ieee80211softmac_scan(struct work_struct *work);
82/* for internal use if scanning is needed */ 82/* for internal use if scanning is needed */
83int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); 83int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
84void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); 84void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
@@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au
149int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); 149int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
150 150
151/*** prototypes from _assoc.c */ 151/*** prototypes from _assoc.c */
152void ieee80211softmac_assoc_work(void *d); 152void ieee80211softmac_assoc_work(struct work_struct *work);
153int ieee80211softmac_handle_assoc_response(struct net_device * dev, 153int ieee80211softmac_handle_assoc_response(struct net_device * dev,
154 struct ieee80211_assoc_response * resp, 154 struct ieee80211_assoc_response * resp,
155 struct ieee80211_network * network); 155 struct ieee80211_network * network);
@@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev,
157 struct ieee80211_disassoc * disassoc); 157 struct ieee80211_disassoc * disassoc);
158int ieee80211softmac_handle_reassoc_req(struct net_device * dev, 158int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
159 struct ieee80211_reassoc_request * reassoc); 159 struct ieee80211_reassoc_request * reassoc);
160void ieee80211softmac_assoc_timeout(void *d); 160void ieee80211softmac_assoc_timeout(struct work_struct *work);
161void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); 161void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
162void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); 162void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
163 163
@@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item {
207 struct ieee80211softmac_device *mac; /* SoftMAC device */ 207 struct ieee80211softmac_device *mac; /* SoftMAC device */
208 u8 retry; /* Retry limit */ 208 u8 retry; /* Retry limit */
209 u8 state; /* Auth State */ 209 u8 state; /* Auth State */
210 struct work_struct work; /* Work queue */ 210 struct delayed_work work; /* Work queue */
211}; 211};
212 212
213/* scanning information */ 213/* scanning information */
@@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo {
219 stop:1; 219 stop:1;
220 u8 skip_flags; 220 u8 skip_flags;
221 struct completion finished; 221 struct completion finished;
222 struct work_struct softmac_scan; 222 struct delayed_work softmac_scan;
223 struct ieee80211softmac_device *mac;
223}; 224};
224 225
225/* private event struct */ 226/* private event struct */
@@ -227,7 +228,7 @@ struct ieee80211softmac_event {
227 struct list_head list; 228 struct list_head list;
228 int event_type; 229 int event_type;
229 void *event_context; 230 void *event_context;
230 struct work_struct work; 231 struct delayed_work work;
231 notify_function_ptr fun; 232 notify_function_ptr fun;
232 void *context; 233 void *context;
233 struct ieee80211softmac_device *mac; 234 struct ieee80211softmac_device *mac;
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index 5507feab32de..0c85d6c24cdb 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -90,12 +90,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm)
90 90
91 91
92/* internal scanning implementation follows */ 92/* internal scanning implementation follows */
93void ieee80211softmac_scan(void *d) 93void ieee80211softmac_scan(struct work_struct *work)
94{ 94{
95 int invalid_channel; 95 int invalid_channel;
96 u8 current_channel_idx; 96 u8 current_channel_idx;
97 struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; 97 struct ieee80211softmac_scaninfo *si =
98 struct ieee80211softmac_scaninfo *si = sm->scaninfo; 98 container_of(work, struct ieee80211softmac_scaninfo,
99 softmac_scan.work);
100 struct ieee80211softmac_device *sm = si->mac;
99 unsigned long flags; 101 unsigned long flags;
100 102
101 while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { 103 while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
@@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802
146 struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); 148 struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
147 if (unlikely(!info)) 149 if (unlikely(!info))
148 return NULL; 150 return NULL;
149 INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); 151 INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
152 info->mac = mac;
150 init_completion(&info->finished); 153 init_completion(&info->finished);
151 return info; 154 return info;
152} 155}
@@ -187,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev)
187 sm->scaninfo->started = 1; 190 sm->scaninfo->started = 1;
188 sm->scaninfo->stop = 0; 191 sm->scaninfo->stop = 0;
189 INIT_COMPLETION(sm->scaninfo->finished); 192 INIT_COMPLETION(sm->scaninfo->finished);
190 schedule_work(&sm->scaninfo->softmac_scan); 193 schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
191 spin_unlock_irqrestore(&sm->lock, flags); 194 spin_unlock_irqrestore(&sm->lock, flags);
192 return 0; 195 return 0;
193} 196}
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 23068a830f7d..2ffaebd21c53 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
122 122
123 sm->associnfo.associating = 1; 123 sm->associnfo.associating = 1;
124 /* queue lower level code to do work (if necessary) */ 124 /* queue lower level code to do work (if necessary) */
125 schedule_work(&sm->associnfo.work); 125 schedule_delayed_work(&sm->associnfo.work, 0);
126out: 126out:
127 mutex_unlock(&sm->associnfo.mutex); 127 mutex_unlock(&sm->associnfo.mutex);
128 128
@@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
356 /* force reassociation */ 356 /* force reassociation */
357 mac->associnfo.bssvalid = 0; 357 mac->associnfo.bssvalid = 0;
358 if (mac->associnfo.associated) 358 if (mac->associnfo.associated)
359 schedule_work(&mac->associnfo.work); 359 schedule_delayed_work(&mac->associnfo.work, 0);
360 } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { 360 } else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
361 /* the bssid we have is no longer fixed */ 361 /* the bssid we have is no longer fixed */
362 mac->associnfo.bssfixed = 0; 362 mac->associnfo.bssfixed = 0;
@@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
373 /* tell the other code that this bssid should be used no matter what */ 373 /* tell the other code that this bssid should be used no matter what */
374 mac->associnfo.bssfixed = 1; 374 mac->associnfo.bssfixed = 1;
375 /* queue associate if new bssid or (old one again and not associated) */ 375 /* queue associate if new bssid or (old one again and not associated) */
376 schedule_work(&mac->associnfo.work); 376 schedule_delayed_work(&mac->associnfo.work, 0);
377 } 377 }
378 378
379 out: 379 out:
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index cdd805344c61..8c74f9168b7d 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman);
197 197
198extern void twkill_slots_invalid(void); 198extern void twkill_slots_invalid(void);
199 199
200void inet_twdr_twkill_work(void *data) 200void inet_twdr_twkill_work(struct work_struct *work)
201{ 201{
202 struct inet_timewait_death_row *twdr = data; 202 struct inet_timewait_death_row *twdr =
203 container_of(work, struct inet_timewait_death_row, twkill_work);
203 int i; 204 int i;
204 205
205 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) 206 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f261616e4602..9b933381ebbe 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -221,10 +221,10 @@ static void update_defense_level(void)
221 * Timer for checking the defense 221 * Timer for checking the defense
222 */ 222 */
223#define DEFENSE_TIMER_PERIOD 1*HZ 223#define DEFENSE_TIMER_PERIOD 1*HZ
224static void defense_work_handler(void *data); 224static void defense_work_handler(struct work_struct *work);
225static DECLARE_WORK(defense_work, defense_work_handler, NULL); 225static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
226 226
227static void defense_work_handler(void *data) 227static void defense_work_handler(struct work_struct *work)
228{ 228{
229 update_defense_level(); 229 update_defense_level();
230 if (atomic_read(&ip_vs_dropentry)) 230 if (atomic_read(&ip_vs_dropentry))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6dddf59c1fb9..4a3889dd1943 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = {
45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
46 (unsigned long)&tcp_death_row), 46 (unsigned long)&tcp_death_row),
47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, 47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
48 inet_twdr_twkill_work, 48 inet_twdr_twkill_work),
49 &tcp_death_row),
50/* Short-time timewait calendar */ 49/* Short-time timewait calendar */
51 50
52 .twcal_hand = -1, 51 .twcal_hand = -1,
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index d50a02030ad7..262bda808d96 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty);
61static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); 61static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
62static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); 62static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
63static void ircomm_tty_hangup(struct tty_struct *tty); 63static void ircomm_tty_hangup(struct tty_struct *tty);
64static void ircomm_tty_do_softint(void *private_); 64static void ircomm_tty_do_softint(struct work_struct *work);
65static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); 65static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
66static void ircomm_tty_stop(struct tty_struct *tty); 66static void ircomm_tty_stop(struct tty_struct *tty);
67 67
@@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
389 self->flow = FLOW_STOP; 389 self->flow = FLOW_STOP;
390 390
391 self->line = line; 391 self->line = line;
392 INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); 392 INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
393 self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; 393 self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
394 self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; 394 self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
395 self->close_delay = 5*HZ/10; 395 self->close_delay = 5*HZ/10;
@@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty)
594} 594}
595 595
596/* 596/*
597 * Function ircomm_tty_do_softint (private_) 597 * Function ircomm_tty_do_softint (work)
598 * 598 *
599 * We use this routine to give the write wakeup to the user at at a 599 * We use this routine to give the write wakeup to the user at at a
600 * safe time (as fast as possible after write have completed). This 600 * safe time (as fast as possible after write have completed). This
601 * can be compared to the Tx interrupt. 601 * can be compared to the Tx interrupt.
602 */ 602 */
603static void ircomm_tty_do_softint(void *private_) 603static void ircomm_tty_do_softint(struct work_struct *work)
604{ 604{
605 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; 605 struct ircomm_tty_cb *self =
606 container_of(work, struct ircomm_tty_cb, tqueue);
606 struct tty_struct *tty; 607 struct tty_struct *tty;
607 unsigned long flags; 608 unsigned long flags;
608 struct sk_buff *skb, *ctrl_skb; 609 struct sk_buff *skb, *ctrl_skb;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 39471d3b31b9..ad0057db0f91 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -61,7 +61,7 @@
61#include <net/sctp/sm.h> 61#include <net/sctp/sm.h>
62 62
63/* Forward declarations for internal functions. */ 63/* Forward declarations for internal functions. */
64static void sctp_assoc_bh_rcv(struct sctp_association *asoc); 64static void sctp_assoc_bh_rcv(struct work_struct *work);
65 65
66 66
67/* 1st Level Abstractions. */ 67/* 1st Level Abstractions. */
@@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
269 269
270 /* Create an input queue. */ 270 /* Create an input queue. */
271 sctp_inq_init(&asoc->base.inqueue); 271 sctp_inq_init(&asoc->base.inqueue);
272 sctp_inq_set_th_handler(&asoc->base.inqueue, 272 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
273 (void (*)(void *))sctp_assoc_bh_rcv,
274 asoc);
275 273
276 /* Create an output queue. */ 274 /* Create an output queue. */
277 sctp_outq_init(asoc, &asoc->outqueue); 275 sctp_outq_init(asoc, &asoc->outqueue);
@@ -946,8 +944,11 @@ out:
946} 944}
947 945
948/* Do delayed input processing. This is scheduled by sctp_rcv(). */ 946/* Do delayed input processing. This is scheduled by sctp_rcv(). */
949static void sctp_assoc_bh_rcv(struct sctp_association *asoc) 947static void sctp_assoc_bh_rcv(struct work_struct *work)
950{ 948{
949 struct sctp_association *asoc =
950 container_of(work, struct sctp_association,
951 base.inqueue.immediate);
951 struct sctp_endpoint *ep; 952 struct sctp_endpoint *ep;
952 struct sctp_chunk *chunk; 953 struct sctp_chunk *chunk;
953 struct sock *sk; 954 struct sock *sk;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 33a42e90c32f..129756908da4 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -61,7 +61,7 @@
61#include <net/sctp/sm.h> 61#include <net/sctp/sm.h>
62 62
63/* Forward declarations for internal helpers. */ 63/* Forward declarations for internal helpers. */
64static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); 64static void sctp_endpoint_bh_rcv(struct work_struct *work);
65 65
66/* 66/*
67 * Initialize the base fields of the endpoint structure. 67 * Initialize the base fields of the endpoint structure.
@@ -89,8 +89,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
89 sctp_inq_init(&ep->base.inqueue); 89 sctp_inq_init(&ep->base.inqueue);
90 90
91 /* Set its top-half handler */ 91 /* Set its top-half handler */
92 sctp_inq_set_th_handler(&ep->base.inqueue, 92 sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
93 (void (*)(void *))sctp_endpoint_bh_rcv, ep);
94 93
95 /* Initialize the bind addr area */ 94 /* Initialize the bind addr area */
96 sctp_bind_addr_init(&ep->base.bind_addr, 0); 95 sctp_bind_addr_init(&ep->base.bind_addr, 0);
@@ -318,8 +317,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
318/* Do delayed input processing. This is scheduled by sctp_rcv(). 317/* Do delayed input processing. This is scheduled by sctp_rcv().
319 * This may be called on BH or task time. 318 * This may be called on BH or task time.
320 */ 319 */
321static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) 320static void sctp_endpoint_bh_rcv(struct work_struct *work)
322{ 321{
322 struct sctp_endpoint *ep =
323 container_of(work, struct sctp_endpoint,
324 base.inqueue.immediate);
323 struct sctp_association *asoc; 325 struct sctp_association *asoc;
324 struct sock *sk; 326 struct sock *sk;
325 struct sctp_transport *transport; 327 struct sctp_transport *transport;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cf6deed7e849..71b07466e880 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue)
54 queue->in_progress = NULL; 54 queue->in_progress = NULL;
55 55
56 /* Create a task for delivering data. */ 56 /* Create a task for delivering data. */
57 INIT_WORK(&queue->immediate, NULL, NULL); 57 INIT_WORK(&queue->immediate, NULL);
58 58
59 queue->malloced = 0; 59 queue->malloced = 0;
60} 60}
@@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
97 * on the BH related data structures. 97 * on the BH related data structures.
98 */ 98 */
99 list_add_tail(&chunk->list, &q->in_chunk_list); 99 list_add_tail(&chunk->list, &q->in_chunk_list);
100 q->immediate.func(q->immediate.data); 100 q->immediate.func(&q->immediate);
101} 101}
102 102
103/* Extract a chunk from an SCTP inqueue. 103/* Extract a chunk from an SCTP inqueue.
@@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
205 * The intent is that this routine will pull stuff out of the 205 * The intent is that this routine will pull stuff out of the
206 * inqueue and process it. 206 * inqueue and process it.
207 */ 207 */
208void sctp_inq_set_th_handler(struct sctp_inq *q, 208void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
209 void (*callback)(void *), void *arg)
210{ 209{
211 INIT_WORK(&q->immediate, callback, arg); 210 INIT_WORK(&q->immediate, callback);
212} 211}
213 212
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 00cb388ece03..d96fd466a9a4 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -284,8 +284,8 @@ static struct file_operations cache_file_operations;
284static struct file_operations content_file_operations; 284static struct file_operations content_file_operations;
285static struct file_operations cache_flush_operations; 285static struct file_operations cache_flush_operations;
286 286
287static void do_cache_clean(void *data); 287static void do_cache_clean(struct work_struct *work);
288static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); 288static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
289 289
290void cache_register(struct cache_detail *cd) 290void cache_register(struct cache_detail *cd)
291{ 291{
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
337 spin_unlock(&cache_list_lock); 337 spin_unlock(&cache_list_lock);
338 338
339 /* start the cleaning process */ 339 /* start the cleaning process */
340 schedule_work(&cache_cleaner); 340 schedule_delayed_work(&cache_cleaner, 0);
341} 341}
342 342
343int cache_unregister(struct cache_detail *cd) 343int cache_unregister(struct cache_detail *cd)
@@ -461,7 +461,7 @@ static int cache_clean(void)
461/* 461/*
462 * We want to regularly clean the cache, so we need to schedule some work ... 462 * We want to regularly clean the cache, so we need to schedule some work ...
463 */ 463 */
464static void do_cache_clean(void *data) 464static void do_cache_clean(struct work_struct *work)
465{ 465{
466 int delay = 5; 466 int delay = 5;
467 if (cache_clean() == -1) 467 if (cache_clean() == -1)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9a0b41a97f90..49dba5febbbd 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
54} 54}
55 55
56static void 56static void
57rpc_timeout_upcall_queue(void *data) 57rpc_timeout_upcall_queue(struct work_struct *work)
58{ 58{
59 LIST_HEAD(free_list); 59 LIST_HEAD(free_list);
60 struct rpc_inode *rpci = (struct rpc_inode *)data; 60 struct rpc_inode *rpci =
61 container_of(work, struct rpc_inode, queue_timeout.work);
61 struct inode *inode = &rpci->vfs_inode; 62 struct inode *inode = &rpci->vfs_inode;
62 void (*destroy_msg)(struct rpc_pipe_msg *); 63 void (*destroy_msg)(struct rpc_pipe_msg *);
63 64
@@ -837,7 +838,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
837 INIT_LIST_HEAD(&rpci->pipe); 838 INIT_LIST_HEAD(&rpci->pipe);
838 rpci->pipelen = 0; 839 rpci->pipelen = 0;
839 init_waitqueue_head(&rpci->waitq); 840 init_waitqueue_head(&rpci->waitq);
840 INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); 841 INIT_DELAYED_WORK(&rpci->queue_timeout,
842 rpc_timeout_upcall_queue);
841 rpci->ops = NULL; 843 rpci->ops = NULL;
842 } 844 }
843} 845}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index a1ab4eed41f4..eff44bcdc95a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -41,7 +41,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly;
41 41
42static void __rpc_default_timer(struct rpc_task *task); 42static void __rpc_default_timer(struct rpc_task *task);
43static void rpciod_killall(void); 43static void rpciod_killall(void);
44static void rpc_async_schedule(void *); 44static void rpc_async_schedule(struct work_struct *);
45 45
46/* 46/*
47 * RPC tasks sit here while waiting for conditions to improve. 47 * RPC tasks sit here while waiting for conditions to improve.
@@ -305,7 +305,7 @@ static void rpc_make_runnable(struct rpc_task *task)
305 if (RPC_IS_ASYNC(task)) { 305 if (RPC_IS_ASYNC(task)) {
306 int status; 306 int status;
307 307
308 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); 308 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
309 status = queue_work(task->tk_workqueue, &task->u.tk_work); 309 status = queue_work(task->tk_workqueue, &task->u.tk_work);
310 if (status < 0) { 310 if (status < 0) {
311 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); 311 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
@@ -695,9 +695,9 @@ rpc_execute(struct rpc_task *task)
695 return __rpc_execute(task); 695 return __rpc_execute(task);
696} 696}
697 697
698static void rpc_async_schedule(void *arg) 698static void rpc_async_schedule(struct work_struct *work)
699{ 699{
700 __rpc_execute((struct rpc_task *)arg); 700 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
701} 701}
702 702
703/** 703/**
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 80857470dc11..4f9a5d9791fb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -479,9 +479,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
479 return status; 479 return status;
480} 480}
481 481
482static void xprt_autoclose(void *args) 482static void xprt_autoclose(struct work_struct *work)
483{ 483{
484 struct rpc_xprt *xprt = (struct rpc_xprt *)args; 484 struct rpc_xprt *xprt =
485 container_of(work, struct rpc_xprt, task_cleanup);
485 486
486 xprt_disconnect(xprt); 487 xprt_disconnect(xprt);
487 xprt->ops->close(xprt); 488 xprt->ops->close(xprt);
@@ -932,7 +933,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si
932 933
933 INIT_LIST_HEAD(&xprt->free); 934 INIT_LIST_HEAD(&xprt->free);
934 INIT_LIST_HEAD(&xprt->recv); 935 INIT_LIST_HEAD(&xprt->recv);
935 INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); 936 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
936 init_timer(&xprt->timer); 937 init_timer(&xprt->timer);
937 xprt->timer.function = xprt_init_autodisconnect; 938 xprt->timer.function = xprt_init_autodisconnect;
938 xprt->timer.data = (unsigned long) xprt; 939 xprt->timer.data = (unsigned long) xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 757fc91ef25d..cfe3c15be948 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1060,13 +1060,14 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
1060 1060
1061/** 1061/**
1062 * xs_udp_connect_worker - set up a UDP socket 1062 * xs_udp_connect_worker - set up a UDP socket
1063 * @args: RPC transport to connect 1063 * @work: RPC transport to connect
1064 * 1064 *
1065 * Invoked by a work queue tasklet. 1065 * Invoked by a work queue tasklet.
1066 */ 1066 */
1067static void xs_udp_connect_worker(void *args) 1067static void xs_udp_connect_worker(struct work_struct *work)
1068{ 1068{
1069 struct rpc_xprt *xprt = (struct rpc_xprt *) args; 1069 struct rpc_xprt *xprt =
1070 container_of(work, struct rpc_xprt, connect_worker.work);
1070 struct socket *sock = xprt->sock; 1071 struct socket *sock = xprt->sock;
1071 int err, status = -EIO; 1072 int err, status = -EIO;
1072 1073
@@ -1144,13 +1145,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
1144 1145
1145/** 1146/**
1146 * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint 1147 * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
1147 * @args: RPC transport to connect 1148 * @work: RPC transport to connect
1148 * 1149 *
1149 * Invoked by a work queue tasklet. 1150 * Invoked by a work queue tasklet.
1150 */ 1151 */
1151static void xs_tcp_connect_worker(void *args) 1152static void xs_tcp_connect_worker(struct work_struct *work)
1152{ 1153{
1153 struct rpc_xprt *xprt = (struct rpc_xprt *)args; 1154 struct rpc_xprt *xprt =
1155 container_of(work, struct rpc_xprt, connect_worker.work);
1154 struct socket *sock = xprt->sock; 1156 struct socket *sock = xprt->sock;
1155 int err, status = -EIO; 1157 int err, status = -EIO;
1156 1158
@@ -1262,7 +1264,7 @@ static void xs_connect(struct rpc_task *task)
1262 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 1264 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
1263 } else { 1265 } else {
1264 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 1266 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
1265 schedule_work(&xprt->connect_worker); 1267 schedule_delayed_work(&xprt->connect_worker, 0);
1266 1268
1267 /* flush_scheduled_work can sleep... */ 1269 /* flush_scheduled_work can sleep... */
1268 if (!RPC_IS_ASYNC(task)) 1270 if (!RPC_IS_ASYNC(task))
@@ -1375,7 +1377,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1375 /* XXX: header size can vary due to auth type, IPv6, etc. */ 1377 /* XXX: header size can vary due to auth type, IPv6, etc. */
1376 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 1378 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
1377 1379
1378 INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); 1380 INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker);
1379 xprt->bind_timeout = XS_BIND_TO; 1381 xprt->bind_timeout = XS_BIND_TO;
1380 xprt->connect_timeout = XS_UDP_CONN_TO; 1382 xprt->connect_timeout = XS_UDP_CONN_TO;
1381 xprt->reestablish_timeout = XS_UDP_REEST_TO; 1383 xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1422,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1420 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1422 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1421 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1423 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1422 1424
1423 INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); 1425 INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker);
1424 xprt->bind_timeout = XS_BIND_TO; 1426 xprt->bind_timeout = XS_BIND_TO;
1425 xprt->connect_timeout = XS_TCP_CONN_TO; 1427 xprt->connect_timeout = XS_TCP_CONN_TO;
1426 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1428 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 64d3938f74c4..f6c77bd36fdd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -392,7 +392,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
392 xfrm_pol_put(policy); 392 xfrm_pol_put(policy);
393} 393}
394 394
395static void xfrm_policy_gc_task(void *data) 395static void xfrm_policy_gc_task(struct work_struct *work)
396{ 396{
397 struct xfrm_policy *policy; 397 struct xfrm_policy *policy;
398 struct hlist_node *entry, *tmp; 398 struct hlist_node *entry, *tmp;
@@ -580,7 +580,7 @@ static inline int xfrm_byidx_should_resize(int total)
580 580
581static DEFINE_MUTEX(hash_resize_mutex); 581static DEFINE_MUTEX(hash_resize_mutex);
582 582
583static void xfrm_hash_resize(void *__unused) 583static void xfrm_hash_resize(struct work_struct *__unused)
584{ 584{
585 int dir, total; 585 int dir, total;
586 586
@@ -597,7 +597,7 @@ static void xfrm_hash_resize(void *__unused)
597 mutex_unlock(&hash_resize_mutex); 597 mutex_unlock(&hash_resize_mutex);
598} 598}
599 599
600static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); 600static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
601 601
602/* Generate new index... KAME seems to generate them ordered by cost 602/* Generate new index... KAME seems to generate them ordered by cost
603 * of an absolute inpredictability of ordering of rules. This will not pass. */ 603 * of an absolute inpredictability of ordering of rules. This will not pass. */
@@ -2116,7 +2116,7 @@ static void __init xfrm_policy_init(void)
2116 panic("XFRM: failed to allocate bydst hash\n"); 2116 panic("XFRM: failed to allocate bydst hash\n");
2117 } 2117 }
2118 2118
2119 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); 2119 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
2120 register_netdevice_notifier(&xfrm_dev_notifier); 2120 register_netdevice_notifier(&xfrm_dev_notifier);
2121} 2121}
2122 2122
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 864962bbda90..da54a64ccfa3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void)
115 115
116static DEFINE_MUTEX(hash_resize_mutex); 116static DEFINE_MUTEX(hash_resize_mutex);
117 117
118static void xfrm_hash_resize(void *__unused) 118static void xfrm_hash_resize(struct work_struct *__unused)
119{ 119{
120 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; 120 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
121 unsigned long nsize, osize; 121 unsigned long nsize, osize;
@@ -168,7 +168,7 @@ out_unlock:
168 mutex_unlock(&hash_resize_mutex); 168 mutex_unlock(&hash_resize_mutex);
169} 169}
170 170
171static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); 171static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
172 172
173DECLARE_WAIT_QUEUE_HEAD(km_waitq); 173DECLARE_WAIT_QUEUE_HEAD(km_waitq);
174EXPORT_SYMBOL(km_waitq); 174EXPORT_SYMBOL(km_waitq);
@@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
207 kfree(x); 207 kfree(x);
208} 208}
209 209
210static void xfrm_state_gc_task(void *data) 210static void xfrm_state_gc_task(struct work_struct *data)
211{ 211{
212 struct xfrm_state *x; 212 struct xfrm_state *x;
213 struct hlist_node *entry, *tmp; 213 struct hlist_node *entry, *tmp;
@@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void)
1568 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); 1568 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1569 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); 1569 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1570 1570
1571 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); 1571 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1572} 1572}
1573 1573
diff --git a/security/keys/key.c b/security/keys/key.c
index 80de8c3e9cc3..70eacbe5abde 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -30,8 +30,8 @@ DEFINE_SPINLOCK(key_user_lock);
30static LIST_HEAD(key_types_list); 30static LIST_HEAD(key_types_list);
31static DECLARE_RWSEM(key_types_sem); 31static DECLARE_RWSEM(key_types_sem);
32 32
33static void key_cleanup(void *data); 33static void key_cleanup(struct work_struct *work);
34static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); 34static DECLARE_WORK(key_cleanup_task, key_cleanup);
35 35
36/* we serialise key instantiation and link */ 36/* we serialise key instantiation and link */
37DECLARE_RWSEM(key_construction_sem); 37DECLARE_RWSEM(key_construction_sem);
@@ -552,7 +552,7 @@ EXPORT_SYMBOL(key_negate_and_link);
552 * do cleaning up in process context so that we don't have to disable 552 * do cleaning up in process context so that we don't have to disable
553 * interrupts all over the place 553 * interrupts all over the place
554 */ 554 */
555static void key_cleanup(void *data) 555static void key_cleanup(struct work_struct *work)
556{ 556{
557 struct rb_node *_n; 557 struct rb_node *_n;
558 struct key *key; 558 struct key *key;
diff --git a/sound/aoa/aoa-gpio.h b/sound/aoa/aoa-gpio.h
index 3a61f3115573..ee64f5de8966 100644
--- a/sound/aoa/aoa-gpio.h
+++ b/sound/aoa/aoa-gpio.h
@@ -59,10 +59,10 @@ struct gpio_methods {
59}; 59};
60 60
61struct gpio_notification { 61struct gpio_notification {
62 struct delayed_work work;
62 notify_func_t notify; 63 notify_func_t notify;
63 void *data; 64 void *data;
64 void *gpio_private; 65 void *gpio_private;
65 struct work_struct work;
66 struct mutex mutex; 66 struct mutex mutex;
67}; 67};
68 68
diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c
index 40eb47eccf9a..2b03bc798bcb 100644
--- a/sound/aoa/core/snd-aoa-gpio-feature.c
+++ b/sound/aoa/core/snd-aoa-gpio-feature.c
@@ -195,9 +195,10 @@ static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt)
195 ftr_gpio_set_lineout(rt, (s>>2)&1); 195 ftr_gpio_set_lineout(rt, (s>>2)&1);
196} 196}
197 197
198static void ftr_handle_notify(void *data) 198static void ftr_handle_notify(struct work_struct *work)
199{ 199{
200 struct gpio_notification *notif = data; 200 struct gpio_notification *notif =
201 container_of(work, struct gpio_notification, work.work);
201 202
202 mutex_lock(&notif->mutex); 203 mutex_lock(&notif->mutex);
203 if (notif->notify) 204 if (notif->notify)
@@ -253,12 +254,9 @@ static void ftr_gpio_init(struct gpio_runtime *rt)
253 254
254 ftr_gpio_all_amps_off(rt); 255 ftr_gpio_all_amps_off(rt);
255 rt->implementation_private = 0; 256 rt->implementation_private = 0;
256 INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify, 257 INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
257 &rt->headphone_notify); 258 INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
258 INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify, 259 INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
259 &rt->line_in_notify);
260 INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify,
261 &rt->line_out_notify);
262 mutex_init(&rt->headphone_notify.mutex); 260 mutex_init(&rt->headphone_notify.mutex);
263 mutex_init(&rt->line_in_notify.mutex); 261 mutex_init(&rt->line_in_notify.mutex);
264 mutex_init(&rt->line_out_notify.mutex); 262 mutex_init(&rt->line_out_notify.mutex);
@@ -287,7 +285,7 @@ static irqreturn_t ftr_handle_notify_irq(int xx, void *data)
287{ 285{
288 struct gpio_notification *notif = data; 286 struct gpio_notification *notif = data;
289 287
290 schedule_work(&notif->work); 288 schedule_delayed_work(&notif->work, 0);
291 289
292 return IRQ_HANDLED; 290 return IRQ_HANDLED;
293} 291}
diff --git a/sound/aoa/core/snd-aoa-gpio-pmf.c b/sound/aoa/core/snd-aoa-gpio-pmf.c
index 2836c3218391..5ca2220eac7d 100644
--- a/sound/aoa/core/snd-aoa-gpio-pmf.c
+++ b/sound/aoa/core/snd-aoa-gpio-pmf.c
@@ -69,9 +69,10 @@ static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt)
69 pmf_gpio_set_lineout(rt, (s>>2)&1); 69 pmf_gpio_set_lineout(rt, (s>>2)&1);
70} 70}
71 71
72static void pmf_handle_notify(void *data) 72static void pmf_handle_notify(struct work_struct *work)
73{ 73{
74 struct gpio_notification *notif = data; 74 struct gpio_notification *notif =
75 container_of(work, struct gpio_notification, work.work);
75 76
76 mutex_lock(&notif->mutex); 77 mutex_lock(&notif->mutex);
77 if (notif->notify) 78 if (notif->notify)
@@ -83,12 +84,9 @@ static void pmf_gpio_init(struct gpio_runtime *rt)
83{ 84{
84 pmf_gpio_all_amps_off(rt); 85 pmf_gpio_all_amps_off(rt);
85 rt->implementation_private = 0; 86 rt->implementation_private = 0;
86 INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify, 87 INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify);
87 &rt->headphone_notify); 88 INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify);
88 INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify, 89 INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify);
89 &rt->line_in_notify);
90 INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify,
91 &rt->line_out_notify);
92 mutex_init(&rt->headphone_notify.mutex); 90 mutex_init(&rt->headphone_notify.mutex);
93 mutex_init(&rt->line_in_notify.mutex); 91 mutex_init(&rt->line_in_notify.mutex);
94 mutex_init(&rt->line_out_notify.mutex); 92 mutex_init(&rt->line_out_notify.mutex);
@@ -129,7 +127,7 @@ static void pmf_handle_notify_irq(void *data)
129{ 127{
130 struct gpio_notification *notif = data; 128 struct gpio_notification *notif = data;
131 129
132 schedule_work(&notif->work); 130 schedule_delayed_work(&notif->work, 0);
133} 131}
134 132
135static int pmf_set_notify(struct gpio_runtime *rt, 133static int pmf_set_notify(struct gpio_runtime *rt,
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 12ffffc9e814..d2f2c5078e65 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
35 35
36#define AK4114_ADDR 0x00 /* fixed address */ 36#define AK4114_ADDR 0x00 /* fixed address */
37 37
38static void ak4114_stats(void *); 38static void ak4114_stats(struct work_struct *work);
39 39
40static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val) 40static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val)
41{ 41{
@@ -158,7 +158,7 @@ void snd_ak4114_reinit(struct ak4114 *chip)
158 reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN); 158 reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN);
159 /* bring up statistics / event queing */ 159 /* bring up statistics / event queing */
160 chip->init = 0; 160 chip->init = 0;
161 INIT_WORK(&chip->work, ak4114_stats, chip); 161 INIT_DELAYED_WORK(&chip->work, ak4114_stats);
162 queue_delayed_work(chip->workqueue, &chip->work, HZ / 10); 162 queue_delayed_work(chip->workqueue, &chip->work, HZ / 10);
163} 163}
164 164
@@ -561,9 +561,9 @@ int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags)
561 return res; 561 return res;
562} 562}
563 563
564static void ak4114_stats(void *data) 564static void ak4114_stats(struct work_struct *work)
565{ 565{
566 struct ak4114 *chip = (struct ak4114 *)data; 566 struct ak4114 *chip = container_of(work, struct ak4114, work.work);
567 567
568 if (chip->init) 568 if (chip->init)
569 return; 569 return;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 6577b2325357..7abcb10b2754 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1927,9 +1927,10 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
1927static struct snd_ac97_build_ops null_build_ops; 1927static struct snd_ac97_build_ops null_build_ops;
1928 1928
1929#ifdef CONFIG_SND_AC97_POWER_SAVE 1929#ifdef CONFIG_SND_AC97_POWER_SAVE
1930static void do_update_power(void *data) 1930static void do_update_power(struct work_struct *work)
1931{ 1931{
1932 update_power_regs(data); 1932 update_power_regs(
1933 container_of(work, struct snd_ac97, power_work.work));
1933} 1934}
1934#endif 1935#endif
1935 1936
@@ -1989,7 +1990,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
1989 mutex_init(&ac97->page_mutex); 1990 mutex_init(&ac97->page_mutex);
1990#ifdef CONFIG_SND_AC97_POWER_SAVE 1991#ifdef CONFIG_SND_AC97_POWER_SAVE
1991 ac97->power_workq = create_workqueue("ac97"); 1992 ac97->power_workq = create_workqueue("ac97");
1992 INIT_WORK(&ac97->power_work, do_update_power, ac97); 1993 INIT_DELAYED_WORK(&ac97->power_work, do_update_power);
1993#endif 1994#endif
1994 1995
1995#ifdef CONFIG_PCI 1996#ifdef CONFIG_PCI
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9c3d7ac08068..71482c15a852 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -272,10 +272,11 @@ EXPORT_SYMBOL(snd_hda_queue_unsol_event);
272/* 272/*
273 * process queueud unsolicited events 273 * process queueud unsolicited events
274 */ 274 */
275static void process_unsol_events(void *data) 275static void process_unsol_events(struct work_struct *work)
276{ 276{
277 struct hda_bus *bus = data; 277 struct hda_bus_unsolicited *unsol =
278 struct hda_bus_unsolicited *unsol = bus->unsol; 278 container_of(work, struct hda_bus_unsolicited, work);
279 struct hda_bus *bus = unsol->bus;
279 struct hda_codec *codec; 280 struct hda_codec *codec;
280 unsigned int rp, caddr, res; 281 unsigned int rp, caddr, res;
281 282
@@ -314,7 +315,8 @@ static int init_unsol_queue(struct hda_bus *bus)
314 kfree(unsol); 315 kfree(unsol);
315 return -ENOMEM; 316 return -ENOMEM;
316 } 317 }
317 INIT_WORK(&unsol->work, process_unsol_events, bus); 318 INIT_WORK(&unsol->work, process_unsol_events);
319 unsol->bus = bus;
318 bus->unsol = unsol; 320 bus->unsol = unsol;
319 return 0; 321 return 0;
320} 322}
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index f9416c36396e..9ca1baf860bd 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -206,6 +206,7 @@ struct hda_bus_unsolicited {
206 /* workqueue */ 206 /* workqueue */
207 struct workqueue_struct *workq; 207 struct workqueue_struct *workq;
208 struct work_struct work; 208 struct work_struct work;
209 struct hda_bus *bus;
209}; 210};
210 211
211/* 212/*
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 2fbe1d183fce..8f074c7936e6 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -942,10 +942,11 @@ static void check_mute(struct snd_pmac *chip, struct pmac_gpio *gp, int val, int
942} 942}
943 943
944static struct work_struct device_change; 944static struct work_struct device_change;
945static struct snd_pmac *device_change_chip;
945 946
946static void device_change_handler(void *self) 947static void device_change_handler(struct work_struct *work)
947{ 948{
948 struct snd_pmac *chip = self; 949 struct snd_pmac *chip = device_change_chip;
949 struct pmac_tumbler *mix; 950 struct pmac_tumbler *mix;
950 int headphone, lineout; 951 int headphone, lineout;
951 952
@@ -1417,7 +1418,8 @@ int __init snd_pmac_tumbler_init(struct snd_pmac *chip)
1417 chip->resume = tumbler_resume; 1418 chip->resume = tumbler_resume;
1418#endif 1419#endif
1419 1420
1420 INIT_WORK(&device_change, device_change_handler, (void *)chip); 1421 INIT_WORK(&device_change, device_change_handler);
1422 device_change_chip = chip;
1421 1423
1422#ifdef PMAC_SUPPORT_AUTOMUTE 1424#ifdef PMAC_SUPPORT_AUTOMUTE
1423 if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0) 1425 if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0)