aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt31
-rw-r--r--arch/arm/common/sharpsl_pm.c22
-rw-r--r--arch/arm/mach-omap1/board-h3.c3
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c6
-rw-r--r--arch/arm/mach-omap1/leds-osk.c4
-rw-r--r--arch/arm/mach-omap2/board-h4.c3
-rw-r--r--arch/arm/mach-pxa/akita-ioexp.c6
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c6
-rw-r--r--arch/i386/kernel/smpboot.c11
-rw-r--r--arch/i386/kernel/tsc.c4
-rw-r--r--arch/ia64/hp/sim/simserial.c4
-rw-r--r--arch/ia64/kernel/mca.c8
-rw-r--r--arch/ia64/kernel/smpboot.c12
-rw-r--r--arch/m68knommu/platform/5307/timers.c16
-rw-r--r--arch/m68knommu/platform/68360/config.c2
-rw-r--r--arch/mips/kernel/kspd.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/ls_uart.c4
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c12
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c6
-rw-r--r--arch/ppc/8260_io/fcc_enet.c21
-rw-r--r--arch/ppc/8xx_io/fec.c21
-rw-r--r--arch/s390/appldata/appldata_base.c6
-rw-r--r--arch/um/drivers/chan_kern.c2
-rw-r--r--arch/um/drivers/mconsole_kern.c4
-rw-r--r--arch/um/drivers/net_kern.c1
-rw-r--r--arch/um/drivers/port_kern.c4
-rw-r--r--arch/x86_64/kernel/mce.c6
-rw-r--r--arch/x86_64/kernel/smpboot.c12
-rw-r--r--arch/x86_64/kernel/time.c4
-rw-r--r--block/as-iosched.c7
-rw-r--r--block/cfq-iosched.c8
-rw-r--r--block/ll_rw_blk.c8
-rw-r--r--block/scsi_ioctl.c2
-rw-r--r--crypto/cryptomgr.c7
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/ata/libata-core.c25
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/libata-scsi.c14
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/ata/pata_pcmcia.c15
-rw-r--r--drivers/atm/idt77252.c9
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/floppy.c10
-rw-r--r--drivers/block/paride/pd.c8
-rw-r--r--drivers/block/paride/pseudo.h10
-rw-r--r--drivers/block/sx8.c7
-rw-r--r--drivers/block/ub.c8
-rw-r--r--drivers/bluetooth/bcm203x.c7
-rw-r--r--drivers/bluetooth/bluecard_cs.c38
-rw-r--r--drivers/bluetooth/bt3c_cs.c20
-rw-r--r--drivers/bluetooth/btuart_cs.c20
-rw-r--r--drivers/bluetooth/dtl1_cs.c20
-rw-r--r--drivers/char/cyclades.c9
-rw-r--r--drivers/char/drm/via_dmablit.c6
-rw-r--r--drivers/char/epca.c8
-rw-r--r--drivers/char/esp.c14
-rw-r--r--drivers/char/genrtc.c4
-rw-r--r--drivers/char/hvsi.c16
-rw-r--r--drivers/char/ip2/i2lib.c12
-rw-r--r--drivers/char/ip2/ip2main.c23
-rw-r--r--drivers/char/isicom.c12
-rw-r--r--drivers/char/moxa.c8
-rw-r--r--drivers/char/mxser.c9
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c26
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c26
-rw-r--r--drivers/char/pcmcia/synclink_cs.c15
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/sonypi.c4
-rw-r--r--drivers/char/specialix.c14
-rw-r--r--drivers/char/synclink.c9
-rw-r--r--drivers/char/synclink_gt.c10
-rw-r--r--drivers/char/synclinkmp.c8
-rw-r--r--drivers/char/sysrq.c4
-rw-r--r--drivers/char/tpm/tpm.c6
-rw-r--r--drivers/char/tty_io.c31
-rw-r--r--drivers/char/vt.c6
-rw-r--r--drivers/connector/cn_queue.c8
-rw-r--r--drivers/connector/connector.c31
-rw-r--r--drivers/cpufreq/cpufreq.c10
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c7
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
-rw-r--r--drivers/i2c/chips/ds1374.c12
-rw-r--r--drivers/ide/legacy/ide-cs.c20
-rw-r--r--drivers/ieee1394/hosts.c9
-rw-r--r--drivers/ieee1394/hosts.h2
-rw-r--r--drivers/ieee1394/sbp2.c28
-rw-r--r--drivers/ieee1394/sbp2.h2
-rw-r--r--drivers/infiniband/core/addr.c6
-rw-r--r--drivers/infiniband/core/cache.c7
-rw-r--r--drivers/infiniband/core/cm.c19
-rw-r--r--drivers/infiniband/core/cma.c10
-rw-r--r--drivers/infiniband/core/iwcm.c6
-rw-r--r--drivers/infiniband/core/mad.c25
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/core/mad_rmpp.c18
-rw-r--r--drivers/infiniband/core/sa_query.c10
-rw-r--r--drivers/infiniband/core/uverbs_mem.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c22
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/input/keyboard/atkbd.c6
-rw-r--r--drivers/input/keyboard/lkkbd.c6
-rw-r--r--drivers/input/keyboard/sunkbd.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c7
-rw-r--r--drivers/input/serio/libps2.c6
-rw-r--r--drivers/isdn/act2000/capi.c4
-rw-r--r--drivers/isdn/act2000/capi.h2
-rw-r--r--drivers/isdn/act2000/module.c18
-rw-r--r--drivers/isdn/capi/kcapi.c14
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c36
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c7
-rw-r--r--drivers/isdn/hisax/avma1_cs.c36
-rw-r--r--drivers/isdn/hisax/config.c9
-rw-r--r--drivers/isdn/hisax/elsa_cs.c17
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c5
-rw-r--r--drivers/isdn/hisax/hfc_2bds0.c9
-rw-r--r--drivers/isdn/hisax/hfc_pci.c6
-rw-r--r--drivers/isdn/hisax/hfc_sx.c6
-rw-r--r--drivers/isdn/hisax/icc.c6
-rw-r--r--drivers/isdn/hisax/isac.c6
-rw-r--r--drivers/isdn/hisax/isar.c6
-rw-r--r--drivers/isdn/hisax/isdnl1.c6
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c10
-rw-r--r--drivers/isdn/hisax/teles_cs.c17
-rw-r--r--drivers/isdn/hisax/w6692.c6
-rw-r--r--drivers/isdn/i4l/isdn_net.c6
-rw-r--r--drivers/isdn/pcbit/drv.c4
-rw-r--r--drivers/isdn/pcbit/layer2.c6
-rw-r--r--drivers/isdn/pcbit/pcbit.h2
-rw-r--r--drivers/macintosh/rack-meter.c16
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/md/dm-crypt.c8
-rw-r--r--drivers/md/dm-mpath.c18
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-snap.c9
-rw-r--r--drivers/md/kcopyd.c4
-rw-r--r--drivers/media/dvb/b2c2/flexcop-pci.c9
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c18
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c19
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c7
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/video/cpia_pp.c20
-rw-r--r--drivers/media/video/cx88/cx88-input.c6
-rw-r--r--drivers/media/video/ir-kbd-i2c.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-context.c13
-rw-r--r--drivers/media/video/saa6588.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c9
-rw-r--r--drivers/message/fusion/mptfc.c14
-rw-r--r--drivers/message/fusion/mptlan.c29
-rw-r--r--drivers/message/fusion/mptsas.c25
-rw-r--r--drivers/message/fusion/mptspi.c14
-rw-r--r--drivers/message/i2o/driver.c2
-rw-r--r--drivers/message/i2o/exec-osm.c13
-rw-r--r--drivers/message/i2o/i2o_block.c15
-rw-r--r--drivers/message/i2o/i2o_block.h2
-rw-r--r--drivers/misc/tifm_7xx1.c18
-rw-r--r--drivers/mmc/mmc.c14
-rw-r--r--drivers/mmc/mmc.h2
-rw-r--r--drivers/mmc/mmc_sysfs.c10
-rw-r--r--drivers/mmc/tifm_sd.c28
-rw-r--r--drivers/net/8139too.c26
-rw-r--r--drivers/net/bnx2.c6
-rw-r--r--drivers/net/cassini.c6
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/cphy.h2
-rw-r--r--drivers/net/chelsio/cxgb2.c16
-rw-r--r--drivers/net/chelsio/my3126.c8
-rw-r--r--drivers/net/e100.c8
-rw-r--r--drivers/net/e1000/e1000_main.c10
-rw-r--r--drivers/net/ehea/ehea_main.c9
-rw-r--r--drivers/net/hamradio/baycom_epp.c14
-rw-r--r--drivers/net/irda/mcs7780.c6
-rw-r--r--drivers/net/irda/sir-dev.h2
-rw-r--r--drivers/net/irda/sir_dev.c8
-rw-r--r--drivers/net/iseries_veth.c12
-rw-r--r--drivers/net/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/netxen/netxen_nic.h3
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c19
-rw-r--r--drivers/net/ns83820.c10
-rw-r--r--drivers/net/pcmcia/3c574_cs.c25
-rw-r--r--drivers/net/pcmcia/3c589_cs.c19
-rw-r--r--drivers/net/pcmcia/axnet_cs.c6
-rw-r--r--drivers/net/pcmcia/com20020_cs.c13
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c40
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c12
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c12
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c30
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c59
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c40
-rw-r--r--drivers/net/phy/phy.c9
-rw-r--r--drivers/net/plip.c38
-rw-r--r--drivers/net/qla3xxx.c20
-rw-r--r--drivers/net/qla3xxx.h4
-rw-r--r--drivers/net/r8169.c23
-rw-r--r--drivers/net/s2io.c16
-rw-r--r--drivers/net/s2io.h2
-rw-r--r--drivers/net/sis190.c13
-rw-r--r--drivers/net/skge.c15
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/smc91x.c15
-rw-r--r--drivers/net/spider_net.c9
-rw-r--r--drivers/net/sungem.c6
-rw-r--r--drivers/net/tg3.c6
-rw-r--r--drivers/net/tlan.c23
-rw-r--r--drivers/net/tlan.h1
-rw-r--r--drivers/net/tulip/21142.c7
-rw-r--r--drivers/net/tulip/timer.c7
-rw-r--r--drivers/net/tulip/tulip.h7
-rw-r--r--drivers/net/tulip/tulip_core.c3
-rw-r--r--drivers/net/wan/pc300_tty.c23
-rw-r--r--drivers/net/wireless/airo_cs.c19
-rw-r--r--drivers/net/wireless/atmel_cs.c11
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c20
-rw-r--r--drivers/net/wireless/hostap/hostap.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c19
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c13
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c21
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c8
-rw-r--r--drivers/net/wireless/ipw2100.c47
-rw-r--r--drivers/net/wireless/ipw2100.h10
-rw-r--r--drivers/net/wireless/ipw2200.c227
-rw-r--r--drivers/net/wireless/ipw2200.h16
-rw-r--r--drivers/net/wireless/netwave_cs.c18
-rw-r--r--drivers/net/wireless/orinoco.c28
-rw-r--r--drivers/net/wireless/orinoco_cs.c19
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c8
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h4
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c5
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c2
-rw-r--r--drivers/net/wireless/ray_cs.c30
-rw-r--r--drivers/net/wireless/spectrum_cs.c19
-rw-r--r--drivers/net/wireless/wavelan_cs.c33
-rw-r--r--drivers/net/wireless/wl3501_cs.c15
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c30
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h6
-rw-r--r--drivers/oprofile/cpu_buffer.c9
-rw-r--r--drivers/oprofile/cpu_buffer.h2
-rw-r--r--drivers/parport/parport_cs.c9
-rw-r--r--drivers/pci/hotplug/shpchp.h4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c2
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c19
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c8
-rw-r--r--drivers/pcmcia/at91_cf.c69
-rw-r--r--drivers/pcmcia/cs_internal.h2
-rw-r--r--drivers/pcmcia/ds.c274
-rw-r--r--drivers/pcmcia/m32r_cfc.c2
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c7
-rw-r--r--drivers/pcmcia/pd6729.c8
-rw-r--r--drivers/pcmcia/socket_sysfs.c4
-rw-r--r--drivers/rtc/rtc-dev.c7
-rw-r--r--drivers/scsi/53c700.c7
-rw-r--r--drivers/scsi/BusLogic.c12
-rw-r--r--drivers/scsi/Kconfig59
-rw-r--r--drivers/scsi/Makefile7
-rw-r--r--drivers/scsi/NCR5380.c11
-rw-r--r--drivers/scsi/NCR5380.h4
-rw-r--r--drivers/scsi/NCR53c406a.c5
-rw-r--r--drivers/scsi/aacraid/aacraid.h4
-rw-r--r--drivers/scsi/aacraid/commsup.c23
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1740.c10
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.h1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c121
-rw-r--r--drivers/scsi/fd_mcs.c2
-rw-r--r--drivers/scsi/hosts.c8
-rw-r--r--drivers/scsi/ibmvscsi/Makefile2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c958
-rw-r--r--drivers/scsi/imm.c12
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c322
-rw-r--r--drivers/scsi/ipr.h83
-rw-r--r--drivers/scsi/ips.c28
-rw-r--r--drivers/scsi/ips.h9
-rw-r--r--drivers/scsi/libiscsi.c7
-rw-r--r--drivers/scsi/libsas/sas_discover.c22
-rw-r--r--drivers/scsi/libsas/sas_event.c14
-rw-r--r--drivers/scsi/libsas/sas_expander.c36
-rw-r--r--drivers/scsi/libsas/sas_init.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h12
-rw-r--r--drivers/scsi/libsas/sas_phy.c45
-rw-r--r--drivers/scsi/libsas/sas_port.c30
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c92
-rw-r--r--drivers/scsi/libsrp.c441
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c229
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h35
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c42
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c13
-rw-r--r--drivers/scsi/megaraid.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c4
-rw-r--r--drivers/scsi/ncr53c8xx.c19
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c7
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c5
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c6
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c11
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c12
-rw-r--r--drivers/scsi/ppa.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h105
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c47
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c70
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c124
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h7
-rw-r--r--drivers/scsi/scsi.c45
-rw-r--r--drivers/scsi/scsi_error.c33
-rw-r--r--drivers/scsi/scsi_lib.c346
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c232
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_tgt_if.c352
-rw-r--r--drivers/scsi/scsi_tgt_lib.c745
-rw-r--r--drivers/scsi/scsi_tgt_priv.h25
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/scsi_transport_spi.c7
-rw-r--r--drivers/scsi/scsi_wait_scan.c31
-rw-r--r--drivers/scsi/sd.c29
-rw-r--r--drivers/scsi/st.c16
-rw-r--r--drivers/scsi/stex.c130
-rw-r--r--drivers/scsi/t128.h39
-rw-r--r--drivers/serial/mcfserial.c54
-rw-r--r--drivers/serial/serial_cs.c67
-rw-r--r--drivers/spi/pxa2xx_spi.c9
-rw-r--r--drivers/spi/spi_bitbang.c7
-rw-r--r--drivers/telephony/ixj_pcmcia.c37
-rw-r--r--drivers/usb/atm/cxacru.c12
-rw-r--r--drivers/usb/atm/speedtch.c15
-rw-r--r--drivers/usb/atm/ueagle-atm.c6
-rw-r--r--drivers/usb/class/cdc-acm.c6
-rw-r--r--drivers/usb/core/hub.c20
-rw-r--r--drivers/usb/core/message.c7
-rw-r--r--drivers/usb/core/usb.c9
-rw-r--r--drivers/usb/gadget/ether.c6
-rw-r--r--drivers/usb/host/sl811_cs.c15
-rw-r--r--drivers/usb/host/u132-hcd.c62
-rw-r--r--drivers/usb/input/hid-core.c7
-rw-r--r--drivers/usb/misc/appledisplay.c11
-rw-r--r--drivers/usb/misc/ftdi-elan.c86
-rw-r--r--drivers/usb/misc/phidgetkit.c21
-rw-r--r--drivers/usb/misc/phidgetmotorcontrol.c11
-rw-r--r--drivers/usb/net/kaweth.c9
-rw-r--r--drivers/usb/net/pegasus.c6
-rw-r--r--drivers/usb/net/pegasus.h2
-rw-r--r--drivers/usb/net/usbnet.c7
-rw-r--r--drivers/usb/serial/aircable.c13
-rw-r--r--drivers/usb/serial/digi_acceleport.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.c19
-rw-r--r--drivers/usb/serial/keyspan_pda.c22
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/serial/whiteheat.c15
-rw-r--r--drivers/video/console/fbcon.c6
-rw-r--r--drivers/video/pxafb.c7
-rw-r--r--fs/9p/mux.c16
-rw-r--r--fs/aio.c16
-rw-r--r--fs/bio.c6
-rw-r--r--fs/file.c6
-rw-r--r--fs/gfs2/glock.c8
-rw-r--r--fs/ncpfs/inode.c8
-rw-r--r--fs/ncpfs/sock.c20
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/namespace.c8
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4renewd.c5
-rw-r--r--fs/nfsd/nfs4state.c7
-rw-r--r--fs/ocfs2/alloc.c9
-rw-r--r--fs/ocfs2/cluster/heartbeat.c10
-rw-r--r--fs/ocfs2/cluster/quorum.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c78
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h8
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c5
-rw-r--r--fs/ocfs2/dlm/userdlm.c10
-rw-r--r--fs/ocfs2/journal.c7
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/reiserfs/journal.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c21
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c9
-rw-r--r--include/asm-arm/arch-omap/irda.h2
-rw-r--r--include/asm-m68knommu/irq.h1
-rw-r--r--include/asm-m68knommu/rtc.h1
-rw-r--r--include/asm-m68knommu/ucontext.h6
-rw-r--r--include/linux/aio.h2
-rw-r--r--include/linux/connector.h4
-rw-r--r--include/linux/i2o.h2
-rw-r--r--include/linux/kbd_kern.h2
-rw-r--r--include/linux/libata.h7
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/ncp_fs_sb.h8
-rw-r--r--include/linux/netpoll.h2
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/reiserfs_fs_sb.h3
-rw-r--r--include/linux/relay.h2
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h2
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/workqueue.h145
-rw-r--r--include/net/ieee80211softmac.h4
-rw-r--r--include/net/inet_timewait_sock.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/pcmcia/ss.h5
-rw-r--r--include/scsi/libsas.h37
-rw-r--r--include/scsi/libsrp.h77
-rw-r--r--include/scsi/scsi_cmnd.h10
-rw-r--r--include/scsi/scsi_device.h30
-rw-r--r--include/scsi/scsi_host.h69
-rw-r--r--include/scsi/scsi_tgt.h19
-rw-r--r--include/scsi/scsi_tgt_if.h90
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/scsi/scsi_transport_iscsi.h2
-rw-r--r--include/scsi/scsi_transport_sas.h2
-rw-r--r--include/sound/ac97_codec.h2
-rw-r--r--include/sound/ak4114.h2
-rw-r--r--ipc/util.c7
-rw-r--r--kernel/kmod.c16
-rw-r--r--kernel/kthread.c13
-rw-r--r--kernel/power/poweroff.c4
-rw-r--r--kernel/relay.c10
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/workqueue.c109
-rw-r--r--mm/nommu.c12
-rw-r--r--mm/slab.c12
-rw-r--r--mm/swap.c4
-rw-r--r--net/atm/lec.c9
-rw-r--r--net/atm/lec.h2
-rw-r--r--net/bluetooth/hci_sysfs.c12
-rw-r--r--net/bridge/br_if.c10
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/core/link_watch.c13
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/dccp/minisocks.c3
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c18
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c23
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_event.c12
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c4
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_priv.h13
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_scan.c13
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c6
-rw-r--r--net/ipv4/inet_timewait_sock.c5
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c6
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/irda/ircomm/ircomm_tty.c11
-rw-r--r--net/sctp/associola.c11
-rw-r--r--net/sctp/endpointola.c10
-rw-r--r--net/sctp/inqueue.c9
-rw-r--r--net/sunrpc/cache.c8
-rw-r--r--net/sunrpc/rpc_pipe.c8
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/sunrpc/xprtsock.c20
-rw-r--r--net/xfrm/xfrm_policy.c8
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--security/keys/key.c6
-rw-r--r--sound/aoa/aoa-gpio.h2
-rw-r--r--sound/aoa/core/snd-aoa-gpio-feature.c16
-rw-r--r--sound/aoa/core/snd-aoa-gpio-pmf.c16
-rw-r--r--sound/i2c/other/ak4114.c8
-rw-r--r--sound/pci/ac97/ac97_codec.c7
-rw-r--r--sound/pci/hda/hda_codec.c10
-rw-r--r--sound/pci/hda/hda_local.h1
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c24
-rw-r--r--sound/pcmcia/vx/vxpocket.c26
-rw-r--r--sound/ppc/tumbler.c8
504 files changed, 7562 insertions, 3928 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 15e4fed127f6..2e1898e4e8fd 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1416,6 +1416,11 @@ and is between 256 and 4096 characters. It is defined in the file
1416 1416
1417 scsi_logging= [SCSI] 1417 scsi_logging= [SCSI]
1418 1418
1419 scsi_mod.scan= [SCSI] sync (default) scans SCSI busses as they are
1420 discovered. async scans them in kernel threads,
1421 allowing boot to proceed. none ignores them, expecting
1422 user space to do the scan.
1423
1419 selinux [SELINUX] Disable or enable SELinux at boot time. 1424 selinux [SELINUX] Disable or enable SELinux at boot time.
1420 Format: { "0" | "1" } 1425 Format: { "0" | "1" }
1421 See security/selinux/Kconfig help text. 1426 See security/selinux/Kconfig help text.
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index 75a535a975c3..6f70f2b9327e 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -375,7 +375,6 @@ Summary:
375 scsi_add_device - creates new scsi device (lu) instance 375 scsi_add_device - creates new scsi device (lu) instance
376 scsi_add_host - perform sysfs registration and set up transport class 376 scsi_add_host - perform sysfs registration and set up transport class
377 scsi_adjust_queue_depth - change the queue depth on a SCSI device 377 scsi_adjust_queue_depth - change the queue depth on a SCSI device
378 scsi_assign_lock - replace default host_lock with given lock
379 scsi_bios_ptable - return copy of block device's partition table 378 scsi_bios_ptable - return copy of block device's partition table
380 scsi_block_requests - prevent further commands being queued to given host 379 scsi_block_requests - prevent further commands being queued to given host
381 scsi_deactivate_tcq - turn off tag command queueing 380 scsi_deactivate_tcq - turn off tag command queueing
@@ -489,20 +488,6 @@ void scsi_adjust_queue_depth(struct scsi_device * sdev, int tagged,
489 488
490 489
491/** 490/**
492 * scsi_assign_lock - replace default host_lock with given lock
493 * @shost: a pointer to a scsi host instance
494 * @lock: pointer to lock to replace host_lock for this host
495 *
496 * Returns nothing
497 *
498 * Might block: no
499 *
500 * Defined in: include/scsi/scsi_host.h .
501 **/
502void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
503
504
505/**
506 * scsi_bios_ptable - return copy of block device's partition table 491 * scsi_bios_ptable - return copy of block device's partition table
507 * @dev: pointer to block device 492 * @dev: pointer to block device
508 * 493 *
@@ -1366,17 +1351,11 @@ Locks
1366Each struct Scsi_Host instance has a spin_lock called struct 1351Each struct Scsi_Host instance has a spin_lock called struct
1367Scsi_Host::default_lock which is initialized in scsi_host_alloc() [found in 1352Scsi_Host::default_lock which is initialized in scsi_host_alloc() [found in
1368hosts.c]. Within the same function the struct Scsi_Host::host_lock pointer 1353hosts.c]. Within the same function the struct Scsi_Host::host_lock pointer
1369is initialized to point at default_lock with the scsi_assign_lock() function. 1354is initialized to point at default_lock. Thereafter lock and unlock
1370Thereafter lock and unlock operations performed by the mid level use the 1355operations performed by the mid level use the struct Scsi_Host::host_lock
1371struct Scsi_Host::host_lock pointer. 1356pointer. Previously drivers could override the host_lock pointer but
1372 1357this is not allowed anymore.
1373LLDs can override the use of struct Scsi_Host::default_lock by 1358
1374using scsi_assign_lock(). The earliest opportunity to do this would
1375be in the detect() function after it has invoked scsi_register(). It
1376could be replaced by a coarser grain lock (e.g. per driver) or a
1377lock of equal granularity (i.e. per host). Using finer grain locks
1378(e.g. per SCSI device) may be possible by juggling locks in
1379queuecommand().
1380 1359
1381Autosense 1360Autosense
1382========= 1361=========
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
index 605dedf96790..b3599743093b 100644
--- a/arch/arm/common/sharpsl_pm.c
+++ b/arch/arm/common/sharpsl_pm.c
@@ -60,16 +60,16 @@ static int sharpsl_ac_check(void);
60static int sharpsl_fatal_check(void); 60static int sharpsl_fatal_check(void);
61static int sharpsl_average_value(int ad); 61static int sharpsl_average_value(int ad);
62static void sharpsl_average_clear(void); 62static void sharpsl_average_clear(void);
63static void sharpsl_charge_toggle(void *private_); 63static void sharpsl_charge_toggle(struct work_struct *private_);
64static void sharpsl_battery_thread(void *private_); 64static void sharpsl_battery_thread(struct work_struct *private_);
65 65
66 66
67/* 67/*
68 * Variables 68 * Variables
69 */ 69 */
70struct sharpsl_pm_status sharpsl_pm; 70struct sharpsl_pm_status sharpsl_pm;
71DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL); 71DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
72DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL); 72DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
73DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); 73DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
74 74
75 75
@@ -116,7 +116,7 @@ void sharpsl_battery_kick(void)
116EXPORT_SYMBOL(sharpsl_battery_kick); 116EXPORT_SYMBOL(sharpsl_battery_kick);
117 117
118 118
119static void sharpsl_battery_thread(void *private_) 119static void sharpsl_battery_thread(struct work_struct *private_)
120{ 120{
121 int voltage, percent, apm_status, i = 0; 121 int voltage, percent, apm_status, i = 0;
122 122
@@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_)
128 /* Corgi cannot confirm when battery fully charged so periodically kick! */ 128 /* Corgi cannot confirm when battery fully charged so periodically kick! */
129 if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) 129 if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
130 && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) 130 && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
131 schedule_work(&toggle_charger); 131 schedule_delayed_work(&toggle_charger, 0);
132 132
133 while(1) { 133 while(1) {
134 voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); 134 voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
@@ -212,7 +212,7 @@ static void sharpsl_charge_off(void)
212 sharpsl_pm_led(SHARPSL_LED_OFF); 212 sharpsl_pm_led(SHARPSL_LED_OFF);
213 sharpsl_pm.charge_mode = CHRG_OFF; 213 sharpsl_pm.charge_mode = CHRG_OFF;
214 214
215 schedule_work(&sharpsl_bat); 215 schedule_delayed_work(&sharpsl_bat, 0);
216} 216}
217 217
218static void sharpsl_charge_error(void) 218static void sharpsl_charge_error(void)
@@ -222,7 +222,7 @@ static void sharpsl_charge_error(void)
222 sharpsl_pm.charge_mode = CHRG_ERROR; 222 sharpsl_pm.charge_mode = CHRG_ERROR;
223} 223}
224 224
225static void sharpsl_charge_toggle(void *private_) 225static void sharpsl_charge_toggle(struct work_struct *private_)
226{ 226{
227 dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies); 227 dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
228 228
@@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data)
254 else if (sharpsl_pm.charge_mode == CHRG_ON) 254 else if (sharpsl_pm.charge_mode == CHRG_ON)
255 sharpsl_charge_off(); 255 sharpsl_charge_off();
256 256
257 schedule_work(&sharpsl_bat); 257 schedule_delayed_work(&sharpsl_bat, 0);
258} 258}
259 259
260 260
@@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data)
279 sharpsl_charge_off(); 279 sharpsl_charge_off();
280 } else if (sharpsl_pm.full_count < 2) { 280 } else if (sharpsl_pm.full_count < 2) {
281 dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); 281 dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
282 schedule_work(&toggle_charger); 282 schedule_delayed_work(&toggle_charger, 0);
283 } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { 283 } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
284 dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); 284 dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
285 schedule_work(&toggle_charger); 285 schedule_delayed_work(&toggle_charger, 0);
286 } else { 286 } else {
287 sharpsl_charge_off(); 287 sharpsl_charge_off();
288 sharpsl_pm.charge_mode = CHRG_DONE; 288 sharpsl_pm.charge_mode = CHRG_DONE;
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f225a083dee1..9d2346fb68f4 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode)
323 323
324 cancel_delayed_work(&irda_config->gpio_expa); 324 cancel_delayed_work(&irda_config->gpio_expa);
325 PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); 325 PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
326 schedule_work(&irda_config->gpio_expa); 326#error this is not permitted - mode is an argument variable
327 schedule_delayed_work(&irda_config->gpio_expa, 0);
327 328
328 return 0; 329 return 0;
329} 330}
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index dbc555d209ff..cbe909bad79b 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = {
74 .rows = 8, 74 .rows = 8,
75 .cols = 8, 75 .cols = 8,
76 .keymap = nokia770_keymap, 76 .keymap = nokia770_keymap,
77 .keymapsize = ARRAY_SIZE(nokia770_keymap) 77 .keymapsize = ARRAY_SIZE(nokia770_keymap),
78 .delay = 4, 78 .delay = 4,
79}; 79};
80 80
@@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void)
191 printk("HP connected\n"); 191 printk("HP connected\n");
192} 192}
193 193
194static void codec_delayed_power_down(void *arg) 194static void codec_delayed_power_down(struct work_struct *work)
195{ 195{
196 down(&audio_pwr_sem); 196 down(&audio_pwr_sem);
197 if (audio_pwr_state == -1) 197 if (audio_pwr_state == -1)
@@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg)
200 up(&audio_pwr_sem); 200 up(&audio_pwr_sem);
201} 201}
202 202
203static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL); 203static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
204 204
205static void nokia770_audio_pwr_down(void) 205static void nokia770_audio_pwr_down(void)
206{ 206{
diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c
index 3b29e59b0e6f..0cbf1b0071f8 100644
--- a/arch/arm/mach-omap1/leds-osk.c
+++ b/arch/arm/mach-omap1/leds-osk.c
@@ -35,7 +35,7 @@ static u8 hw_led_state;
35 35
36static u8 tps_leds_change; 36static u8 tps_leds_change;
37 37
38static void tps_work(void *unused) 38static void tps_work(struct work_struct *unused)
39{ 39{
40 for (;;) { 40 for (;;) {
41 u8 leds; 41 u8 leds;
@@ -61,7 +61,7 @@ static void tps_work(void *unused)
61 } 61 }
62} 62}
63 63
64static DECLARE_WORK(work, tps_work, NULL); 64static DECLARE_WORK(work, tps_work);
65 65
66#ifdef CONFIG_OMAP_OSK_MISTRAL 66#ifdef CONFIG_OMAP_OSK_MISTRAL
67 67
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 26a95a642ad7..3b1ad1d981a3 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode)
206 206
207 cancel_delayed_work(&irda_config->gpio_expa); 207 cancel_delayed_work(&irda_config->gpio_expa);
208 PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); 208 PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
209 schedule_work(&irda_config->gpio_expa); 209#error this is not permitted - mode is an argument variable
210 schedule_delayed_work(&irda_config->gpio_expa, 0);
210 211
211 return 0; 212 return 0;
212} 213}
diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c
index 1b398742ab56..12d2fe0ceff6 100644
--- a/arch/arm/mach-pxa/akita-ioexp.c
+++ b/arch/arm/mach-pxa/akita-ioexp.c
@@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD;
36 36
37static int max7310_write(struct i2c_client *client, int address, int data); 37static int max7310_write(struct i2c_client *client, int address, int data);
38static struct i2c_client max7310_template; 38static struct i2c_client max7310_template;
39static void akita_ioexp_work(void *private_); 39static void akita_ioexp_work(struct work_struct *private_);
40 40
41static struct device *akita_ioexp_device; 41static struct device *akita_ioexp_device;
42static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT; 42static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
43DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL); 43DECLARE_WORK(akita_ioexp, akita_ioexp_work);
44 44
45 45
46/* 46/*
@@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit)
158EXPORT_SYMBOL(akita_set_ioexp); 158EXPORT_SYMBOL(akita_set_ioexp);
159EXPORT_SYMBOL(akita_reset_ioexp); 159EXPORT_SYMBOL(akita_reset_ioexp);
160 160
161static void akita_ioexp_work(void *private_) 161static void akita_ioexp_work(struct work_struct *private_)
162{ 162{
163 if (akita_ioexp_device) 163 if (akita_ioexp_device)
164 max7310_set_ouputs(akita_ioexp_device, ioexp_output_value); 164 max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 1f9153ae5b03..6b5d3518a1c0 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -51,10 +51,10 @@ static void mce_checkregs (void *info)
51 } 51 }
52} 52}
53 53
54static void mce_work_fn(void *data); 54static void mce_work_fn(struct work_struct *work);
55static DECLARE_WORK(mce_work, mce_work_fn, NULL); 55static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
56 56
57static void mce_work_fn(void *data) 57static void mce_work_fn(struct work_struct *work)
58{ 58{
59 on_each_cpu(mce_checkregs, NULL, 1, 1); 59 on_each_cpu(mce_checkregs, NULL, 1, 1);
60 schedule_delayed_work(&mce_work, MCE_RATE); 60 schedule_delayed_work(&mce_work, MCE_RATE);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 4bb8b77cd65b..02a9b66b6ac3 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1049,13 +1049,15 @@ void cpu_exit_clear(void)
1049 1049
1050struct warm_boot_cpu_info { 1050struct warm_boot_cpu_info {
1051 struct completion *complete; 1051 struct completion *complete;
1052 struct work_struct task;
1052 int apicid; 1053 int apicid;
1053 int cpu; 1054 int cpu;
1054}; 1055};
1055 1056
1056static void __cpuinit do_warm_boot_cpu(void *p) 1057static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
1057{ 1058{
1058 struct warm_boot_cpu_info *info = p; 1059 struct warm_boot_cpu_info *info =
1060 container_of(work, struct warm_boot_cpu_info, task);
1059 do_boot_cpu(info->apicid, info->cpu); 1061 do_boot_cpu(info->apicid, info->cpu);
1060 complete(info->complete); 1062 complete(info->complete);
1061} 1063}
@@ -1064,7 +1066,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1064{ 1066{
1065 DECLARE_COMPLETION_ONSTACK(done); 1067 DECLARE_COMPLETION_ONSTACK(done);
1066 struct warm_boot_cpu_info info; 1068 struct warm_boot_cpu_info info;
1067 struct work_struct task;
1068 int apicid, ret; 1069 int apicid, ret;
1069 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); 1070 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
1070 1071
@@ -1089,7 +1090,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1089 info.complete = &done; 1090 info.complete = &done;
1090 info.apicid = apicid; 1091 info.apicid = apicid;
1091 info.cpu = cpu; 1092 info.cpu = cpu;
1092 INIT_WORK(&task, do_warm_boot_cpu, &info); 1093 INIT_WORK(&info.task, do_warm_boot_cpu);
1093 1094
1094 tsc_sync_disabled = 1; 1095 tsc_sync_disabled = 1;
1095 1096
@@ -1097,7 +1098,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1097 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, 1098 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
1098 KERNEL_PGD_PTRS); 1099 KERNEL_PGD_PTRS);
1099 flush_tlb_all(); 1100 flush_tlb_all();
1100 schedule_work(&task); 1101 schedule_work(&info.task);
1101 wait_for_completion(&done); 1102 wait_for_completion(&done);
1102 1103
1103 tsc_sync_disabled = 0; 1104 tsc_sync_disabled = 0;
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index fbc95828cd74..9810c8c90750 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -217,7 +217,7 @@ static unsigned int cpufreq_delayed_issched = 0;
217static unsigned int cpufreq_init = 0; 217static unsigned int cpufreq_init = 0;
218static struct work_struct cpufreq_delayed_get_work; 218static struct work_struct cpufreq_delayed_get_work;
219 219
220static void handle_cpufreq_delayed_get(void *v) 220static void handle_cpufreq_delayed_get(struct work_struct *work)
221{ 221{
222 unsigned int cpu; 222 unsigned int cpu;
223 223
@@ -306,7 +306,7 @@ static int __init cpufreq_tsc(void)
306{ 306{
307 int ret; 307 int ret;
308 308
309 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); 309 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
310 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, 310 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
311 CPUFREQ_TRANSITION_NOTIFIER); 311 CPUFREQ_TRANSITION_NOTIFIER);
312 if (!ret) 312 if (!ret)
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index caab986af70c..b62f0c4d2c7c 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -209,7 +209,7 @@ static void do_serial_bh(void)
209} 209}
210#endif 210#endif
211 211
212static void do_softint(void *private_) 212static void do_softint(struct work_struct *private_)
213{ 213{
214 printk(KERN_ERR "simserial: do_softint called\n"); 214 printk(KERN_ERR "simserial: do_softint called\n");
215} 215}
@@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info)
698 info->flags = sstate->flags; 698 info->flags = sstate->flags;
699 info->xmit_fifo_size = sstate->xmit_fifo_size; 699 info->xmit_fifo_size = sstate->xmit_fifo_size;
700 info->line = line; 700 info->line = line;
701 INIT_WORK(&info->work, do_softint, info); 701 INIT_WORK(&info->work, do_softint);
702 info->state = sstate; 702 info->state = sstate;
703 if (sstate->info) { 703 if (sstate->info) {
704 kfree(info); 704 kfree(info);
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7cfa63a98cb3..6bedd97570ca 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
678 * disable the cmc interrupt vector. 678 * disable the cmc interrupt vector.
679 */ 679 */
680static void 680static void
681ia64_mca_cmc_vector_disable_keventd(void *unused) 681ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
682{ 682{
683 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); 683 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
684} 684}
@@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused)
690 * enable the cmc interrupt vector. 690 * enable the cmc interrupt vector.
691 */ 691 */
692static void 692static void
693ia64_mca_cmc_vector_enable_keventd(void *unused) 693ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
694{ 694{
695 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); 695 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
696} 696}
@@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1247 monarch_cpu = -1; 1247 monarch_cpu = -1;
1248} 1248}
1249 1249
1250static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); 1250static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
1251static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); 1251static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
1252 1252
1253/* 1253/*
1254 * ia64_mca_cmc_int_handler 1254 * ia64_mca_cmc_int_handler
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index f7d7f5668144..b21ddecea943 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
463} 463}
464 464
465struct create_idle { 465struct create_idle {
466 struct work_struct work;
466 struct task_struct *idle; 467 struct task_struct *idle;
467 struct completion done; 468 struct completion done;
468 int cpu; 469 int cpu;
469}; 470};
470 471
471void 472void
472do_fork_idle(void *_c_idle) 473do_fork_idle(struct work_struct *work)
473{ 474{
474 struct create_idle *c_idle = _c_idle; 475 struct create_idle *c_idle =
476 container_of(work, struct create_idle, work);
475 477
476 c_idle->idle = fork_idle(c_idle->cpu); 478 c_idle->idle = fork_idle(c_idle->cpu);
477 complete(&c_idle->done); 479 complete(&c_idle->done);
@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu)
482{ 484{
483 int timeout; 485 int timeout;
484 struct create_idle c_idle = { 486 struct create_idle c_idle = {
487 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
485 .cpu = cpu, 488 .cpu = cpu,
486 .done = COMPLETION_INITIALIZER(c_idle.done), 489 .done = COMPLETION_INITIALIZER(c_idle.done),
487 }; 490 };
488 DECLARE_WORK(work, do_fork_idle, &c_idle);
489 491
490 c_idle.idle = get_idle_for_cpu(cpu); 492 c_idle.idle = get_idle_for_cpu(cpu);
491 if (c_idle.idle) { 493 if (c_idle.idle) {
@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu)
497 * We can't use kernel_thread since we must avoid to reschedule the child. 499 * We can't use kernel_thread since we must avoid to reschedule the child.
498 */ 500 */
499 if (!keventd_up() || current_is_keventd()) 501 if (!keventd_up() || current_is_keventd())
500 work.func(work.data); 502 c_idle.work.func(&c_idle.work);
501 else { 503 else {
502 schedule_work(&work); 504 schedule_work(&c_idle.work);
503 wait_for_completion(&c_idle.done); 505 wait_for_completion(&c_idle.done);
504 } 506 }
505 507
diff --git a/arch/m68knommu/platform/5307/timers.c b/arch/m68knommu/platform/5307/timers.c
index 24781f009337..e5668af19789 100644
--- a/arch/m68knommu/platform/5307/timers.c
+++ b/arch/m68knommu/platform/5307/timers.c
@@ -3,7 +3,7 @@
3/* 3/*
4 * timers.c -- generic ColdFire hardware timer support. 4 * timers.c -- generic ColdFire hardware timer support.
5 * 5 *
6 * Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com) 6 * Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com)
7 */ 7 */
8 8
9/***************************************************************************/ 9/***************************************************************************/
@@ -44,6 +44,14 @@ unsigned int mcf_timerlevel = 5;
44extern void mcf_settimericr(int timer, int level); 44extern void mcf_settimericr(int timer, int level);
45extern int mcf_timerirqpending(int timer); 45extern int mcf_timerirqpending(int timer);
46 46
47#if defined(CONFIG_M532x)
48#define __raw_readtrr __raw_readl
49#define __raw_writetrr __raw_writel
50#else
51#define __raw_readtrr __raw_readw
52#define __raw_writetrr __raw_writew
53#endif
54
47/***************************************************************************/ 55/***************************************************************************/
48 56
49void coldfire_tick(void) 57void coldfire_tick(void)
@@ -57,7 +65,7 @@ void coldfire_tick(void)
57void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)) 65void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *))
58{ 66{
59 __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); 67 __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
60 __raw_writew(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR)); 68 __raw_writetrr(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
61 __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | 69 __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
62 MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); 70 MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
63 71
@@ -76,7 +84,7 @@ unsigned long coldfire_timer_offset(void)
76 unsigned long trr, tcn, offset; 84 unsigned long trr, tcn, offset;
77 85
78 tcn = __raw_readw(TA(MCFTIMER_TCN)); 86 tcn = __raw_readw(TA(MCFTIMER_TCN));
79 trr = __raw_readw(TA(MCFTIMER_TRR)); 87 trr = __raw_readtrr(TA(MCFTIMER_TRR));
80 offset = (tcn * (1000000 / HZ)) / trr; 88 offset = (tcn * (1000000 / HZ)) / trr;
81 89
82 /* Check if we just wrapped the counters and maybe missed a tick */ 90 /* Check if we just wrapped the counters and maybe missed a tick */
@@ -120,7 +128,7 @@ void coldfire_profile_init(void)
120 /* Set up TIMER 2 as high speed profile clock */ 128 /* Set up TIMER 2 as high speed profile clock */
121 __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR)); 129 __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
122 130
123 __raw_writew(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR)); 131 __raw_writetrr(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
124 __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | 132 __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
125 MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR)); 133 MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
126 134
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index c5482e3622eb..1b36f6261764 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -114,7 +114,7 @@ void BSP_gettod (int *yearp, int *monp, int *dayp,
114{ 114{
115} 115}
116 116
117int BSP_hwclk(int op, struct hwclk_time *t) 117int BSP_hwclk(int op, struct rtc_time *t)
118{ 118{
119 if (!op) { 119 if (!op) {
120 /* read */ 120 /* read */
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index f06a144c7881..2c82412b9efe 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -319,7 +319,7 @@ static void sp_cleanup(void)
319static int channel_open = 0; 319static int channel_open = 0;
320 320
321/* the work handler */ 321/* the work handler */
322static void sp_work(void *data) 322static void sp_work(struct work_struct *unused)
323{ 323{
324 if (!channel_open) { 324 if (!channel_open) {
325 if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { 325 if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
@@ -354,7 +354,7 @@ static void startwork(int vpe)
354 return; 354 return;
355 } 355 }
356 356
357 INIT_WORK(&work, sp_work, NULL); 357 INIT_WORK(&work, sp_work);
358 queue_work(workqueue, &work); 358 queue_work(workqueue, &work);
359 } else 359 } else
360 queue_work(workqueue, &work); 360 queue_work(workqueue, &work);
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
index 31bcdae84823..0e837762cc5b 100644
--- a/arch/powerpc/platforms/embedded6xx/ls_uart.c
+++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c
@@ -14,7 +14,7 @@ static unsigned long avr_clock;
14 14
15static struct work_struct wd_work; 15static struct work_struct wd_work;
16 16
17static void wd_stop(void *unused) 17static void wd_stop(struct work_struct *unused)
18{ 18{
19 const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK"; 19 const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
20 int i = 0, rescue = 8; 20 int i = 0, rescue = 8;
@@ -122,7 +122,7 @@ static int __init ls_uarts_init(void)
122 122
123 ls_uart_init(); 123 ls_uart_init();
124 124
125 INIT_WORK(&wd_work, wd_stop, NULL); 125 INIT_WORK(&wd_work, wd_stop);
126 schedule_work(&wd_work); 126 schedule_work(&wd_work);
127 127
128 return 0; 128 return 0;
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index afa593a8544a..c3a89414ddc0 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -18,11 +18,11 @@
18 18
19#define OLD_BACKLIGHT_MAX 15 19#define OLD_BACKLIGHT_MAX 15
20 20
21static void pmac_backlight_key_worker(void *data); 21static void pmac_backlight_key_worker(struct work_struct *work);
22static void pmac_backlight_set_legacy_worker(void *data); 22static void pmac_backlight_set_legacy_worker(struct work_struct *work);
23 23
24static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL); 24static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
25static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL); 25static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
26 26
27/* Although these variables are used in interrupt context, it makes no sense to 27/* Although these variables are used in interrupt context, it makes no sense to
28 * protect them. No user is able to produce enough key events per second and 28 * protect them. No user is able to produce enough key events per second and
@@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value)
94 return level; 94 return level;
95} 95}
96 96
97static void pmac_backlight_key_worker(void *data) 97static void pmac_backlight_key_worker(struct work_struct *work)
98{ 98{
99 if (atomic_read(&kernel_backlight_disabled)) 99 if (atomic_read(&kernel_backlight_disabled))
100 return; 100 return;
@@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness)
166 return error; 166 return error;
167} 167}
168 168
169static void pmac_backlight_set_legacy_worker(void *data) 169static void pmac_backlight_set_legacy_worker(struct work_struct *work)
170{ 170{
171 if (atomic_read(&kernel_backlight_disabled)) 171 if (atomic_read(&kernel_backlight_disabled))
172 return; 172 return;
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 137077451316..49037edf7d39 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -37,8 +37,8 @@
37/* EEH event workqueue setup. */ 37/* EEH event workqueue setup. */
38static DEFINE_SPINLOCK(eeh_eventlist_lock); 38static DEFINE_SPINLOCK(eeh_eventlist_lock);
39LIST_HEAD(eeh_eventlist); 39LIST_HEAD(eeh_eventlist);
40static void eeh_thread_launcher(void *); 40static void eeh_thread_launcher(struct work_struct *);
41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); 41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
42 42
43/* Serialize reset sequences for a given pci device */ 43/* Serialize reset sequences for a given pci device */
44DEFINE_MUTEX(eeh_event_mutex); 44DEFINE_MUTEX(eeh_event_mutex);
@@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy)
103 * eeh_thread_launcher 103 * eeh_thread_launcher
104 * @dummy - unused 104 * @dummy - unused
105 */ 105 */
106static void eeh_thread_launcher(void *dummy) 106static void eeh_thread_launcher(struct work_struct *dummy)
107{ 107{
108 if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) 108 if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
109 printk(KERN_ERR "Failed to start EEH daemon\n"); 109 printk(KERN_ERR "Failed to start EEH daemon\n");
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index 2e1943e27819..709952c25f29 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -385,6 +385,7 @@ struct fcc_enet_private {
385 phy_info_t *phy; 385 phy_info_t *phy;
386 struct work_struct phy_relink; 386 struct work_struct phy_relink;
387 struct work_struct phy_display_config; 387 struct work_struct phy_display_config;
388 struct net_device *dev;
388 389
389 uint sequence_done; 390 uint sequence_done;
390 391
@@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = {
1391 NULL 1392 NULL
1392}; 1393};
1393 1394
1394static void mii_display_status(void *data) 1395static void mii_display_status(struct work_struct *work)
1395{ 1396{
1396 struct net_device *dev = data; 1397 volatile struct fcc_enet_private *fep =
1397 volatile struct fcc_enet_private *fep = dev->priv; 1398 container_of(work, struct fcc_enet_private, phy_relink);
1399 struct net_device *dev = fep->dev;
1398 uint s = fep->phy_status; 1400 uint s = fep->phy_status;
1399 1401
1400 if (!fep->link && !fep->old_link) { 1402 if (!fep->link && !fep->old_link) {
@@ -1428,10 +1430,12 @@ static void mii_display_status(void *data)
1428 printk(".\n"); 1430 printk(".\n");
1429} 1431}
1430 1432
1431static void mii_display_config(void *data) 1433static void mii_display_config(struct work_struct *work)
1432{ 1434{
1433 struct net_device *dev = data; 1435 volatile struct fcc_enet_private *fep =
1434 volatile struct fcc_enet_private *fep = dev->priv; 1436 container_of(work, struct fcc_enet_private,
1437 phy_display_config);
1438 struct net_device *dev = fep->dev;
1435 uint s = fep->phy_status; 1439 uint s = fep->phy_status;
1436 1440
1437 printk("%s: config: auto-negotiation ", dev->name); 1441 printk("%s: config: auto-negotiation ", dev->name);
@@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void)
1758 cep->phy_id_done = 0; 1762 cep->phy_id_done = 0;
1759 cep->phy_addr = fip->fc_phyaddr; 1763 cep->phy_addr = fip->fc_phyaddr;
1760 mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); 1764 mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
1761 INIT_WORK(&cep->phy_relink, mii_display_status, dev); 1765 INIT_WORK(&cep->phy_relink, mii_display_status);
1762 INIT_WORK(&cep->phy_display_config, mii_display_config, dev); 1766 INIT_WORK(&cep->phy_display_config, mii_display_config);
1767 cep->dev = dev;
1763#endif /* CONFIG_USE_MDIO */ 1768#endif /* CONFIG_USE_MDIO */
1764 1769
1765 fip++; 1770 fip++;
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c
index 2f9fa9e3d331..e6c28fb423b2 100644
--- a/arch/ppc/8xx_io/fec.c
+++ b/arch/ppc/8xx_io/fec.c
@@ -173,6 +173,7 @@ struct fec_enet_private {
173 uint phy_speed; 173 uint phy_speed;
174 phy_info_t *phy; 174 phy_info_t *phy;
175 struct work_struct phy_task; 175 struct work_struct phy_task;
176 struct net_device *dev;
176 177
177 uint sequence_done; 178 uint sequence_done;
178 179
@@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev)
1263 printk(".\n"); 1264 printk(".\n");
1264} 1265}
1265 1266
1266static void mii_display_config(void *priv) 1267static void mii_display_config(struct work_struct *work)
1267{ 1268{
1268 struct net_device *dev = (struct net_device *)priv; 1269 struct fec_enet_private *fep =
1269 struct fec_enet_private *fep = dev->priv; 1270 container_of(work, struct fec_enet_private, phy_task);
1271 struct net_device *dev = fep->dev;
1270 volatile uint *s = &(fep->phy_status); 1272 volatile uint *s = &(fep->phy_status);
1271 1273
1272 printk("%s: config: auto-negotiation ", dev->name); 1274 printk("%s: config: auto-negotiation ", dev->name);
@@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv)
1295 fep->sequence_done = 1; 1297 fep->sequence_done = 1;
1296} 1298}
1297 1299
1298static void mii_relink(void *priv) 1300static void mii_relink(struct work_struct *work)
1299{ 1301{
1300 struct net_device *dev = (struct net_device *)priv; 1302 struct fec_enet_private *fep =
1301 struct fec_enet_private *fep = dev->priv; 1303 container_of(work, struct fec_enet_private, phy_task);
1304 struct net_device *dev = fep->dev;
1302 int duplex; 1305 int duplex;
1303 1306
1304 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; 1307 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
@@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev)
1325{ 1328{
1326 struct fec_enet_private *fep = dev->priv; 1329 struct fec_enet_private *fep = dev->priv;
1327 1330
1328 INIT_WORK(&fep->phy_task, mii_relink, (void *)dev); 1331 fep->dev = dev;
1332 INIT_WORK(&fep->phy_task, mii_relink);
1329 schedule_work(&fep->phy_task); 1333 schedule_work(&fep->phy_task);
1330} 1334}
1331 1335
@@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev)
1333{ 1337{
1334 struct fec_enet_private *fep = dev->priv; 1338 struct fec_enet_private *fep = dev->priv;
1335 1339
1336 INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev); 1340 fep->dev = dev;
1341 INIT_WORK(&fep->phy_task, mii_display_config);
1337 schedule_work(&fep->phy_task); 1342 schedule_work(&fep->phy_task);
1338} 1343}
1339 1344
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index af1e8fc7d985..67d5cf9cba83 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -92,8 +92,8 @@ static int appldata_timer_active;
92 * Work queue 92 * Work queue
93 */ 93 */
94static struct workqueue_struct *appldata_wq; 94static struct workqueue_struct *appldata_wq;
95static void appldata_work_fn(void *data); 95static void appldata_work_fn(struct work_struct *work);
96static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); 96static DECLARE_WORK(appldata_work, appldata_work_fn);
97 97
98 98
99/* 99/*
@@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data)
125 * 125 *
126 * call data gathering function for each (active) module 126 * call data gathering function for each (active) module
127 */ 127 */
128static void appldata_work_fn(void *data) 128static void appldata_work_fn(struct work_struct *work)
129{ 129{
130 struct list_head *lh; 130 struct list_head *lh;
131 struct appldata_ops *ops; 131 struct appldata_ops *ops;
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 3576b3cc505e..7d4190e55654 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans)
638 return -1; 638 return -1;
639} 639}
640 640
641void chan_interrupt(struct list_head *chans, struct work_struct *task, 641void chan_interrupt(struct list_head *chans, struct delayed_work *task,
642 struct tty_struct *tty, int irq) 642 struct tty_struct *tty, int irq)
643{ 643{
644 struct list_head *ele, *next; 644 struct list_head *ele, *next;
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 7b172160fe04..96f0189327af 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = {
56 56
57static LIST_HEAD(mc_requests); 57static LIST_HEAD(mc_requests);
58 58
59static void mc_work_proc(void *unused) 59static void mc_work_proc(struct work_struct *unused)
60{ 60{
61 struct mconsole_entry *req; 61 struct mconsole_entry *req;
62 unsigned long flags; 62 unsigned long flags;
@@ -72,7 +72,7 @@ static void mc_work_proc(void *unused)
72 } 72 }
73} 73}
74 74
75static DECLARE_WORK(mconsole_work, mc_work_proc, NULL); 75static DECLARE_WORK(mconsole_work, mc_work_proc);
76 76
77static irqreturn_t mconsole_interrupt(int irq, void *dev_id) 77static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
78{ 78{
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index ec9eb8bd9432..286bc0b3207f 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -99,6 +99,7 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id)
99 * same device, since it tests for (dev->flags & IFF_UP). So 99 * same device, since it tests for (dev->flags & IFF_UP). So
100 * there's no harm in delaying the device shutdown. */ 100 * there's no harm in delaying the device shutdown. */
101 schedule_work(&close_work); 101 schedule_work(&close_work);
102#error this is not permitted - close_work will go out of scope
102 goto out; 103 goto out;
103 } 104 }
104 reactivate_fd(lp->fd, UM_ETH_IRQ); 105 reactivate_fd(lp->fd, UM_ETH_IRQ);
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index ce9f3733f73e..6dfe632f1c14 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -132,7 +132,7 @@ static int port_accept(struct port_list *port)
132DECLARE_MUTEX(ports_sem); 132DECLARE_MUTEX(ports_sem);
133struct list_head ports = LIST_HEAD_INIT(ports); 133struct list_head ports = LIST_HEAD_INIT(ports);
134 134
135void port_work_proc(void *unused) 135void port_work_proc(struct work_struct *unused)
136{ 136{
137 struct port_list *port; 137 struct port_list *port;
138 struct list_head *ele; 138 struct list_head *ele;
@@ -150,7 +150,7 @@ void port_work_proc(void *unused)
150 local_irq_restore(flags); 150 local_irq_restore(flags);
151} 151}
152 152
153DECLARE_WORK(port_work, port_work_proc, NULL); 153DECLARE_WORK(port_work, port_work_proc);
154 154
155static irqreturn_t port_interrupt(int irq, void *data) 155static irqreturn_t port_interrupt(int irq, void *data)
156{ 156{
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index bbea88801d88..c7587fc39015 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
306 */ 306 */
307 307
308static int check_interval = 5 * 60; /* 5 minutes */ 308static int check_interval = 5 * 60; /* 5 minutes */
309static void mcheck_timer(void *data); 309static void mcheck_timer(struct work_struct *work);
310static DECLARE_WORK(mcheck_work, mcheck_timer, NULL); 310static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
311 311
312static void mcheck_check_cpu(void *info) 312static void mcheck_check_cpu(void *info)
313{ 313{
@@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info)
315 do_machine_check(NULL, 0); 315 do_machine_check(NULL, 0);
316} 316}
317 317
318static void mcheck_timer(void *data) 318static void mcheck_timer(struct work_struct *work)
319{ 319{
320 on_each_cpu(mcheck_check_cpu, NULL, 1, 1); 320 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
321 schedule_delayed_work(&mcheck_work, check_interval * HZ); 321 schedule_delayed_work(&mcheck_work, check_interval * HZ);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 62c2e747af58..9800147c4c68 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -753,14 +753,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
753} 753}
754 754
755struct create_idle { 755struct create_idle {
756 struct work_struct work;
756 struct task_struct *idle; 757 struct task_struct *idle;
757 struct completion done; 758 struct completion done;
758 int cpu; 759 int cpu;
759}; 760};
760 761
761void do_fork_idle(void *_c_idle) 762void do_fork_idle(struct work_struct *work)
762{ 763{
763 struct create_idle *c_idle = _c_idle; 764 struct create_idle *c_idle =
765 container_of(work, struct create_idle, work);
764 766
765 c_idle->idle = fork_idle(c_idle->cpu); 767 c_idle->idle = fork_idle(c_idle->cpu);
766 complete(&c_idle->done); 768 complete(&c_idle->done);
@@ -775,10 +777,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
775 int timeout; 777 int timeout;
776 unsigned long start_rip; 778 unsigned long start_rip;
777 struct create_idle c_idle = { 779 struct create_idle c_idle = {
780 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
778 .cpu = cpu, 781 .cpu = cpu,
779 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 782 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
780 }; 783 };
781 DECLARE_WORK(work, do_fork_idle, &c_idle);
782 784
783 /* allocate memory for gdts of secondary cpus. Hotplug is considered */ 785 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
784 if (!cpu_gdt_descr[cpu].address && 786 if (!cpu_gdt_descr[cpu].address &&
@@ -825,9 +827,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
825 * thread. 827 * thread.
826 */ 828 */
827 if (!keventd_up() || current_is_keventd()) 829 if (!keventd_up() || current_is_keventd())
828 work.func(work.data); 830 c_idle.work.func(&c_idle.work);
829 else { 831 else {
830 schedule_work(&work); 832 schedule_work(&c_idle.work);
831 wait_for_completion(&c_idle.done); 833 wait_for_completion(&c_idle.done);
832 } 834 }
833 835
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index e3ef544d2cfb..9f05bc9b2dad 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0;
563static unsigned int cpufreq_init = 0; 563static unsigned int cpufreq_init = 0;
564static struct work_struct cpufreq_delayed_get_work; 564static struct work_struct cpufreq_delayed_get_work;
565 565
566static void handle_cpufreq_delayed_get(void *v) 566static void handle_cpufreq_delayed_get(struct work_struct *v)
567{ 567{
568 unsigned int cpu; 568 unsigned int cpu;
569 for_each_online_cpu(cpu) { 569 for_each_online_cpu(cpu) {
@@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = {
639 639
640static int __init cpufreq_tsc(void) 640static int __init cpufreq_tsc(void)
641{ 641{
642 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); 642 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
643 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, 643 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
644 CPUFREQ_TRANSITION_NOTIFIER)) 644 CPUFREQ_TRANSITION_NOTIFIER))
645 cpufreq_init = 1; 645 cpufreq_init = 1;
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 00242111a457..5934c4bfd52a 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
1274 * 1274 *
1275 * FIXME! dispatch queue is not a queue at all! 1275 * FIXME! dispatch queue is not a queue at all!
1276 */ 1276 */
1277static void as_work_handler(void *data) 1277static void as_work_handler(struct work_struct *work)
1278{ 1278{
1279 struct request_queue *q = data; 1279 struct as_data *ad = container_of(work, struct as_data, antic_work);
1280 struct request_queue *q = ad->q;
1280 unsigned long flags; 1281 unsigned long flags;
1281 1282
1282 spin_lock_irqsave(q->queue_lock, flags); 1283 spin_lock_irqsave(q->queue_lock, flags);
@@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q)
1332 ad->antic_timer.function = as_antic_timeout; 1333 ad->antic_timer.function = as_antic_timeout;
1333 ad->antic_timer.data = (unsigned long)q; 1334 ad->antic_timer.data = (unsigned long)q;
1334 init_timer(&ad->antic_timer); 1335 init_timer(&ad->antic_timer);
1335 INIT_WORK(&ad->antic_work, as_work_handler, q); 1336 INIT_WORK(&ad->antic_work, as_work_handler);
1336 1337
1337 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1338 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1338 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1339 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e9019ed39b73..84e9be073180 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1840,9 +1840,11 @@ queue_fail:
1840 return 1; 1840 return 1;
1841} 1841}
1842 1842
1843static void cfq_kick_queue(void *data) 1843static void cfq_kick_queue(struct work_struct *work)
1844{ 1844{
1845 request_queue_t *q = data; 1845 struct cfq_data *cfqd =
1846 container_of(work, struct cfq_data, unplug_work);
1847 request_queue_t *q = cfqd->queue;
1846 unsigned long flags; 1848 unsigned long flags;
1847 1849
1848 spin_lock_irqsave(q->queue_lock, flags); 1850 spin_lock_irqsave(q->queue_lock, flags);
@@ -1986,7 +1988,7 @@ static void *cfq_init_queue(request_queue_t *q)
1986 cfqd->idle_class_timer.function = cfq_idle_class_timer; 1988 cfqd->idle_class_timer.function = cfq_idle_class_timer;
1987 cfqd->idle_class_timer.data = (unsigned long) cfqd; 1989 cfqd->idle_class_timer.data = (unsigned long) cfqd;
1988 1990
1989 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); 1991 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
1990 1992
1991 cfqd->cfq_quantum = cfq_quantum; 1993 cfqd->cfq_quantum = cfq_quantum;
1992 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 1994 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 0f82e12f7b67..cc6e95f8e5d9 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -34,7 +34,7 @@
34 */ 34 */
35#include <scsi/scsi_cmnd.h> 35#include <scsi/scsi_cmnd.h>
36 36
37static void blk_unplug_work(void *data); 37static void blk_unplug_work(struct work_struct *work);
38static void blk_unplug_timeout(unsigned long data); 38static void blk_unplug_timeout(unsigned long data);
39static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 39static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
40static void init_request_from_bio(struct request *req, struct bio *bio); 40static void init_request_from_bio(struct request *req, struct bio *bio);
@@ -227,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
227 if (q->unplug_delay == 0) 227 if (q->unplug_delay == 0)
228 q->unplug_delay = 1; 228 q->unplug_delay = 1;
229 229
230 INIT_WORK(&q->unplug_work, blk_unplug_work, q); 230 INIT_WORK(&q->unplug_work, blk_unplug_work);
231 231
232 q->unplug_timer.function = blk_unplug_timeout; 232 q->unplug_timer.function = blk_unplug_timeout;
233 q->unplug_timer.data = (unsigned long)q; 233 q->unplug_timer.data = (unsigned long)q;
@@ -1631,9 +1631,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1631 } 1631 }
1632} 1632}
1633 1633
1634static void blk_unplug_work(void *data) 1634static void blk_unplug_work(struct work_struct *work)
1635{ 1635{
1636 request_queue_t *q = data; 1636 request_queue_t *q = container_of(work, request_queue_t, unplug_work);
1637 1637
1638 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 1638 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
1639 q->rq.count[READ] + q->rq.count[WRITE]); 1639 q->rq.count[READ] + q->rq.count[WRITE]);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 5493c2fbbab1..b3e210723a71 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -277,7 +277,7 @@ static int sg_io(struct file *file, request_queue_t *q,
277 if (rq->bio) 277 if (rq->bio)
278 blk_queue_bounce(q, &rq->bio); 278 blk_queue_bounce(q, &rq->bio);
279 279
280 rq->timeout = (hdr->timeout * HZ) / 1000; 280 rq->timeout = jiffies_to_msecs(hdr->timeout);
281 if (!rq->timeout) 281 if (!rq->timeout)
282 rq->timeout = q->sg_timeout; 282 rq->timeout = q->sg_timeout;
283 if (!rq->timeout) 283 if (!rq->timeout)
diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c
index 9b5b15601068..2ebffb84f1d9 100644
--- a/crypto/cryptomgr.c
+++ b/crypto/cryptomgr.c
@@ -40,9 +40,10 @@ struct cryptomgr_param {
40 char template[CRYPTO_MAX_ALG_NAME]; 40 char template[CRYPTO_MAX_ALG_NAME];
41}; 41};
42 42
43static void cryptomgr_probe(void *data) 43static void cryptomgr_probe(struct work_struct *work)
44{ 44{
45 struct cryptomgr_param *param = data; 45 struct cryptomgr_param *param =
46 container_of(work, struct cryptomgr_param, work);
46 struct crypto_template *tmpl; 47 struct crypto_template *tmpl;
47 struct crypto_instance *inst; 48 struct crypto_instance *inst;
48 int err; 49 int err;
@@ -112,7 +113,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
112 param->larval.type = larval->alg.cra_flags; 113 param->larval.type = larval->alg.cra_flags;
113 param->larval.mask = larval->mask; 114 param->larval.mask = larval->mask;
114 115
115 INIT_WORK(&param->work, cryptomgr_probe, param); 116 INIT_WORK(&param->work, cryptomgr_probe);
116 schedule_work(&param->work); 117 schedule_work(&param->work);
117 118
118 return NOTIFY_STOP; 119 return NOTIFY_STOP;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 068fe4f100b0..02b30ae6a68e 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -50,6 +50,7 @@ ACPI_MODULE_NAME("osl")
50struct acpi_os_dpc { 50struct acpi_os_dpc {
51 acpi_osd_exec_callback function; 51 acpi_osd_exec_callback function;
52 void *context; 52 void *context;
53 struct work_struct work;
53}; 54};
54 55
55#ifdef CONFIG_ACPI_CUSTOM_DSDT 56#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -564,12 +565,9 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
564 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); 565 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
565} 566}
566 567
567static void acpi_os_execute_deferred(void *context) 568static void acpi_os_execute_deferred(struct work_struct *work)
568{ 569{
569 struct acpi_os_dpc *dpc = NULL; 570 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
570
571
572 dpc = (struct acpi_os_dpc *)context;
573 if (!dpc) { 571 if (!dpc) {
574 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 572 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
575 return; 573 return;
@@ -602,7 +600,6 @@ acpi_status acpi_os_execute(acpi_execute_type type,
602{ 600{
603 acpi_status status = AE_OK; 601 acpi_status status = AE_OK;
604 struct acpi_os_dpc *dpc; 602 struct acpi_os_dpc *dpc;
605 struct work_struct *task;
606 603
607 ACPI_FUNCTION_TRACE("os_queue_for_execution"); 604 ACPI_FUNCTION_TRACE("os_queue_for_execution");
608 605
@@ -615,28 +612,22 @@ acpi_status acpi_os_execute(acpi_execute_type type,
615 612
616 /* 613 /*
617 * Allocate/initialize DPC structure. Note that this memory will be 614 * Allocate/initialize DPC structure. Note that this memory will be
618 * freed by the callee. The kernel handles the tq_struct list in a 615 * freed by the callee. The kernel handles the work_struct list in a
619 * way that allows us to also free its memory inside the callee. 616 * way that allows us to also free its memory inside the callee.
620 * Because we may want to schedule several tasks with different 617 * Because we may want to schedule several tasks with different
621 * parameters we can't use the approach some kernel code uses of 618 * parameters we can't use the approach some kernel code uses of
622 * having a static tq_struct. 619 * having a static work_struct.
623 * We can save time and code by allocating the DPC and tq_structs
624 * from the same memory.
625 */ 620 */
626 621
627 dpc = 622 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
628 kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
629 GFP_ATOMIC);
630 if (!dpc) 623 if (!dpc)
631 return_ACPI_STATUS(AE_NO_MEMORY); 624 return_ACPI_STATUS(AE_NO_MEMORY);
632 625
633 dpc->function = function; 626 dpc->function = function;
634 dpc->context = context; 627 dpc->context = context;
635 628
636 task = (void *)(dpc + 1); 629 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
637 INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); 630 if (!queue_work(kacpid_wq, &dpc->work)) {
638
639 if (!queue_work(kacpid_wq, task)) {
640 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 631 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
641 "Call to queue_work() failed.\n")); 632 "Call to queue_work() failed.\n"));
642 kfree(dpc); 633 kfree(dpc);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f8ec3896b793..8816e30fb7a4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1081,7 +1081,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
1081 * ata_port_queue_task - Queue port_task 1081 * ata_port_queue_task - Queue port_task
1082 * @ap: The ata_port to queue port_task for 1082 * @ap: The ata_port to queue port_task for
1083 * @fn: workqueue function to be scheduled 1083 * @fn: workqueue function to be scheduled
1084 * @data: data value to pass to workqueue function 1084 * @data: data for @fn to use
1085 * @delay: delay time for workqueue function 1085 * @delay: delay time for workqueue function
1086 * 1086 *
1087 * Schedule @fn(@data) for execution after @delay jiffies using 1087 * Schedule @fn(@data) for execution after @delay jiffies using
@@ -1096,7 +1096,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
1096 * LOCKING: 1096 * LOCKING:
1097 * Inherited from caller. 1097 * Inherited from caller.
1098 */ 1098 */
1099void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, 1099void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1100 unsigned long delay) 1100 unsigned long delay)
1101{ 1101{
1102 int rc; 1102 int rc;
@@ -1104,12 +1104,10 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
1104 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) 1104 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
1105 return; 1105 return;
1106 1106
1107 PREPARE_WORK(&ap->port_task, fn, data); 1107 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1108 ap->port_task_data = data;
1108 1109
1109 if (!delay) 1110 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1110 rc = queue_work(ata_wq, &ap->port_task);
1111 else
1112 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1113 1111
1114 /* rc == 0 means that another user is using port task */ 1112 /* rc == 0 means that another user is using port task */
1115 WARN_ON(rc == 0); 1113 WARN_ON(rc == 0);
@@ -4588,10 +4586,11 @@ fsm_start:
4588 return poll_next; 4586 return poll_next;
4589} 4587}
4590 4588
4591static void ata_pio_task(void *_data) 4589static void ata_pio_task(struct work_struct *work)
4592{ 4590{
4593 struct ata_queued_cmd *qc = _data; 4591 struct ata_port *ap =
4594 struct ata_port *ap = qc->ap; 4592 container_of(work, struct ata_port, port_task.work);
4593 struct ata_queued_cmd *qc = ap->port_task_data;
4595 u8 status; 4594 u8 status;
4596 int poll_next; 4595 int poll_next;
4597 4596
@@ -5635,9 +5634,9 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
5635 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5634 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5636#endif 5635#endif
5637 5636
5638 INIT_WORK(&ap->port_task, NULL, NULL); 5637 INIT_DELAYED_WORK(&ap->port_task, NULL);
5639 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); 5638 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5640 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); 5639 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5641 INIT_LIST_HEAD(&ap->eh_done_q); 5640 INIT_LIST_HEAD(&ap->eh_done_q);
5642 init_waitqueue_head(&ap->eh_wait_q); 5641 init_waitqueue_head(&ap->eh_wait_q);
5643 5642
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 76a85dfb7307..08ad44b3e48f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host)
332 if (ap->pflags & ATA_PFLAG_LOADING) 332 if (ap->pflags & ATA_PFLAG_LOADING)
333 ap->pflags &= ~ATA_PFLAG_LOADING; 333 ap->pflags &= ~ATA_PFLAG_LOADING;
334 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 334 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
335 queue_work(ata_aux_wq, &ap->hotplug_task); 335 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
336 336
337 if (ap->pflags & ATA_PFLAG_RECOVERED) 337 if (ap->pflags & ATA_PFLAG_RECOVERED)
338 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 338 ata_port_printk(ap, KERN_INFO, "EH complete\n");
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 8eaace94d963..664e1377b54c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2963,7 +2963,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
2963 2963
2964/** 2964/**
2965 * ata_scsi_hotplug - SCSI part of hotplug 2965 * ata_scsi_hotplug - SCSI part of hotplug
2966 * @data: Pointer to ATA port to perform SCSI hotplug on 2966 * @work: Pointer to ATA port to perform SCSI hotplug on
2967 * 2967 *
2968 * Perform SCSI part of hotplug. It's executed from a separate 2968 * Perform SCSI part of hotplug. It's executed from a separate
2969 * workqueue after EH completes. This is necessary because SCSI 2969 * workqueue after EH completes. This is necessary because SCSI
@@ -2973,9 +2973,10 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
2973 * LOCKING: 2973 * LOCKING:
2974 * Kernel thread context (may sleep). 2974 * Kernel thread context (may sleep).
2975 */ 2975 */
2976void ata_scsi_hotplug(void *data) 2976void ata_scsi_hotplug(struct work_struct *work)
2977{ 2977{
2978 struct ata_port *ap = data; 2978 struct ata_port *ap =
2979 container_of(work, struct ata_port, hotplug_task.work);
2979 int i; 2980 int i;
2980 2981
2981 if (ap->pflags & ATA_PFLAG_UNLOADING) { 2982 if (ap->pflags & ATA_PFLAG_UNLOADING) {
@@ -3076,7 +3077,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3076 3077
3077/** 3078/**
3078 * ata_scsi_dev_rescan - initiate scsi_rescan_device() 3079 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3079 * @data: Pointer to ATA port to perform scsi_rescan_device() 3080 * @work: Pointer to ATA port to perform scsi_rescan_device()
3080 * 3081 *
3081 * After ATA pass thru (SAT) commands are executed successfully, 3082 * After ATA pass thru (SAT) commands are executed successfully,
3082 * libata need to propagate the changes to SCSI layer. This 3083 * libata need to propagate the changes to SCSI layer. This
@@ -3086,9 +3087,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3086 * LOCKING: 3087 * LOCKING:
3087 * Kernel thread context (may sleep). 3088 * Kernel thread context (may sleep).
3088 */ 3089 */
3089void ata_scsi_dev_rescan(void *data) 3090void ata_scsi_dev_rescan(struct work_struct *work)
3090{ 3091{
3091 struct ata_port *ap = data; 3092 struct ata_port *ap =
3093 container_of(work, struct ata_port, scsi_rescan_task);
3092 unsigned long flags; 3094 unsigned long flags;
3093 unsigned int i; 3095 unsigned int i;
3094 3096
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 107b2b565229..81ae41d5f23f 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -94,7 +94,7 @@ extern struct scsi_transport_template ata_scsi_transport_template;
94 94
95extern void ata_scsi_scan_host(struct ata_port *ap); 95extern void ata_scsi_scan_host(struct ata_port *ap);
96extern int ata_scsi_offline_dev(struct ata_device *dev); 96extern int ata_scsi_offline_dev(struct ata_device *dev);
97extern void ata_scsi_hotplug(void *data); 97extern void ata_scsi_hotplug(struct work_struct *work);
98extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 98extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
99 unsigned int buflen); 99 unsigned int buflen);
100 100
@@ -124,7 +124,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
124 unsigned int (*actor) (struct ata_scsi_args *args, 124 unsigned int (*actor) (struct ata_scsi_args *args,
125 u8 *rbuf, unsigned int buflen)); 125 u8 *rbuf, unsigned int buflen));
126extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 126extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
127extern void ata_scsi_dev_rescan(void *data); 127extern void ata_scsi_dev_rescan(struct work_struct *work);
128extern int ata_bus_probe(struct ata_port *ap); 128extern int ata_bus_probe(struct ata_port *ap);
129 129
130/* libata-eh.c */ 130/* libata-eh.c */
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 4ca6fa5dcb42..9ed7f58424a3 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -154,19 +154,12 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
154 tuple.TupleOffset = 0; 154 tuple.TupleOffset = 0;
155 tuple.TupleDataMax = 255; 155 tuple.TupleDataMax = 255;
156 tuple.Attributes = 0; 156 tuple.Attributes = 0;
157 tuple.DesiredTuple = CISTPL_CONFIG;
158
159 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
160 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(pdev, &tuple));
161 CS_CHECK(ParseTuple, pcmcia_parse_tuple(pdev, &tuple, &stk->parse));
162 pdev->conf.ConfigBase = stk->parse.config.base;
163 pdev->conf.Present = stk->parse.config.rmask[0];
164 157
165 /* See if we have a manufacturer identifier. Use it to set is_kme for 158 /* See if we have a manufacturer identifier. Use it to set is_kme for
166 vendor quirks */ 159 vendor quirks */
167 tuple.DesiredTuple = CISTPL_MANFID; 160 is_kme = ((pdev->manf_id == MANFID_KME) &&
168 if (!pcmcia_get_first_tuple(pdev, &tuple) && !pcmcia_get_tuple_data(pdev, &tuple) && !pcmcia_parse_tuple(pdev, &tuple, &stk->parse)) 161 ((pdev->card_id == PRODID_KME_KXLC005_A) ||
169 is_kme = ((stk->parse.manfid.manf == MANFID_KME) && ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) || (stk->parse.manfid.card == PRODID_KME_KXLC005_B))); 162 (pdev->card_id == PRODID_KME_KXLC005_B)));
170 163
171 /* Not sure if this is right... look up the current Vcc */ 164 /* Not sure if this is right... look up the current Vcc */
172 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(pdev, &stk->conf)); 165 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(pdev, &stk->conf));
@@ -356,8 +349,10 @@ static struct pcmcia_device_id pcmcia_devices[] = {
356 PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), 349 PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
357 PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), 350 PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
358 PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), 351 PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443),
352 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
359 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), 353 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
360 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), 354 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
355 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
361 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 356 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
362 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), 357 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
363 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), 358 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 87b17c33b3f9..f40786121948 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -135,7 +135,7 @@ static int idt77252_change_qos(struct atm_vcc *vcc, struct atm_qos *qos,
135 int flags); 135 int flags);
136static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, 136static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
137 char *page); 137 char *page);
138static void idt77252_softint(void *dev_id); 138static void idt77252_softint(struct work_struct *work);
139 139
140 140
141static struct atmdev_ops idt77252_ops = 141static struct atmdev_ops idt77252_ops =
@@ -2866,9 +2866,10 @@ out:
2866} 2866}
2867 2867
2868static void 2868static void
2869idt77252_softint(void *dev_id) 2869idt77252_softint(struct work_struct *work)
2870{ 2870{
2871 struct idt77252_dev *card = dev_id; 2871 struct idt77252_dev *card =
2872 container_of(work, struct idt77252_dev, tqueue);
2872 u32 stat; 2873 u32 stat;
2873 int done; 2874 int done;
2874 2875
@@ -3697,7 +3698,7 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
3697 card->pcidev = pcidev; 3698 card->pcidev = pcidev;
3698 sprintf(card->name, "idt77252-%d", card->index); 3699 sprintf(card->name, "idt77252-%d", card->index);
3699 3700
3700 INIT_WORK(&card->tqueue, idt77252_softint, (void *)card); 3701 INIT_WORK(&card->tqueue, idt77252_softint);
3701 3702
3702 membase = pci_resource_start(pcidev, 1); 3703 membase = pci_resource_start(pcidev, 1);
3703 srambase = pci_resource_start(pcidev, 2); 3704 srambase = pci_resource_start(pcidev, 2);
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 6d111228cfac..2308e83e5f33 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -159,7 +159,7 @@ void aoecmd_work(struct aoedev *d);
159void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); 159void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
160void aoecmd_ata_rsp(struct sk_buff *); 160void aoecmd_ata_rsp(struct sk_buff *);
161void aoecmd_cfg_rsp(struct sk_buff *); 161void aoecmd_cfg_rsp(struct sk_buff *);
162void aoecmd_sleepwork(void *vp); 162void aoecmd_sleepwork(struct work_struct *);
163struct sk_buff *new_skb(ulong); 163struct sk_buff *new_skb(ulong);
164 164
165int aoedev_init(void); 165int aoedev_init(void);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8a13b1af8bab..97f7f535f412 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -408,9 +408,9 @@ rexmit_timer(ulong vp)
408/* this function performs work that has been deferred until sleeping is OK 408/* this function performs work that has been deferred until sleeping is OK
409 */ 409 */
410void 410void
411aoecmd_sleepwork(void *vp) 411aoecmd_sleepwork(struct work_struct *work)
412{ 412{
413 struct aoedev *d = (struct aoedev *) vp; 413 struct aoedev *d = container_of(work, struct aoedev, work);
414 414
415 if (d->flags & DEVFL_GDALLOC) 415 if (d->flags & DEVFL_GDALLOC)
416 aoeblk_gdalloc(d); 416 aoeblk_gdalloc(d);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 6125921bbec4..05a97197c918 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -88,7 +88,7 @@ aoedev_newdev(ulong nframes)
88 kfree(d); 88 kfree(d);
89 return NULL; 89 return NULL;
90 } 90 }
91 INIT_WORK(&d->work, aoecmd_sleepwork, d); 91 INIT_WORK(&d->work, aoecmd_sleepwork);
92 spin_lock_init(&d->lock); 92 spin_lock_init(&d->lock);
93 init_timer(&d->timer); 93 init_timer(&d->timer);
94 d->timer.data = (ulong) d; 94 d->timer.data = (ulong) d;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e6d3a87cbe3..3f1b38276e96 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -992,11 +992,11 @@ static void empty(void)
992{ 992{
993} 993}
994 994
995static DECLARE_WORK(floppy_work, NULL, NULL); 995static DECLARE_WORK(floppy_work, NULL);
996 996
997static void schedule_bh(void (*handler) (void)) 997static void schedule_bh(void (*handler) (void))
998{ 998{
999 PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL); 999 PREPARE_WORK(&floppy_work, (work_func_t)handler);
1000 schedule_work(&floppy_work); 1000 schedule_work(&floppy_work);
1001} 1001}
1002 1002
@@ -1008,7 +1008,7 @@ static void cancel_activity(void)
1008 1008
1009 spin_lock_irqsave(&floppy_lock, flags); 1009 spin_lock_irqsave(&floppy_lock, flags);
1010 do_floppy = NULL; 1010 do_floppy = NULL;
1011 PREPARE_WORK(&floppy_work, (void *)empty, NULL); 1011 PREPARE_WORK(&floppy_work, (work_func_t)empty);
1012 del_timer(&fd_timer); 1012 del_timer(&fd_timer);
1013 spin_unlock_irqrestore(&floppy_lock, flags); 1013 spin_unlock_irqrestore(&floppy_lock, flags);
1014} 1014}
@@ -1868,7 +1868,7 @@ static void show_floppy(void)
1868 printk("fdc_busy=%lu\n", fdc_busy); 1868 printk("fdc_busy=%lu\n", fdc_busy);
1869 if (do_floppy) 1869 if (do_floppy)
1870 printk("do_floppy=%p\n", do_floppy); 1870 printk("do_floppy=%p\n", do_floppy);
1871 if (floppy_work.pending) 1871 if (work_pending(&floppy_work))
1872 printk("floppy_work.func=%p\n", floppy_work.func); 1872 printk("floppy_work.func=%p\n", floppy_work.func);
1873 if (timer_pending(&fd_timer)) 1873 if (timer_pending(&fd_timer))
1874 printk("fd_timer.function=%p\n", fd_timer.function); 1874 printk("fd_timer.function=%p\n", fd_timer.function);
@@ -4498,7 +4498,7 @@ static void floppy_release_irq_and_dma(void)
4498 printk("floppy timer still active:%s\n", timeout_message); 4498 printk("floppy timer still active:%s\n", timeout_message);
4499 if (timer_pending(&fd_timer)) 4499 if (timer_pending(&fd_timer))
4500 printk("auxiliary floppy timer still active\n"); 4500 printk("auxiliary floppy timer still active\n");
4501 if (floppy_work.pending) 4501 if (work_pending(&floppy_work))
4502 printk("work still pending\n"); 4502 printk("work still pending\n");
4503#endif 4503#endif
4504 old_fdc = fdc; 4504 old_fdc = fdc;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 40a11e567970..9d9bff23f426 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -352,19 +352,19 @@ static enum action (*phase)(void);
352 352
353static void run_fsm(void); 353static void run_fsm(void);
354 354
355static void ps_tq_int( void *data); 355static void ps_tq_int(struct work_struct *work);
356 356
357static DECLARE_WORK(fsm_tq, ps_tq_int, NULL); 357static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
358 358
359static void schedule_fsm(void) 359static void schedule_fsm(void)
360{ 360{
361 if (!nice) 361 if (!nice)
362 schedule_work(&fsm_tq); 362 schedule_delayed_work(&fsm_tq, 0);
363 else 363 else
364 schedule_delayed_work(&fsm_tq, nice-1); 364 schedule_delayed_work(&fsm_tq, nice-1);
365} 365}
366 366
367static void ps_tq_int(void *data) 367static void ps_tq_int(struct work_struct *work)
368{ 368{
369 run_fsm(); 369 run_fsm();
370} 370}
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h
index 932342d7a8eb..bc3703294143 100644
--- a/drivers/block/paride/pseudo.h
+++ b/drivers/block/paride/pseudo.h
@@ -35,7 +35,7 @@
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37 37
38static void ps_tq_int( void *data); 38static void ps_tq_int(struct work_struct *work);
39 39
40static void (* ps_continuation)(void); 40static void (* ps_continuation)(void);
41static int (* ps_ready)(void); 41static int (* ps_ready)(void);
@@ -45,7 +45,7 @@ static int ps_nice = 0;
45 45
46static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); 46static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
47 47
48static DECLARE_WORK(ps_tq, ps_tq_int, NULL); 48static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
49 49
50static void ps_set_intr(void (*continuation)(void), 50static void ps_set_intr(void (*continuation)(void),
51 int (*ready)(void), 51 int (*ready)(void),
@@ -63,14 +63,14 @@ static void ps_set_intr(void (*continuation)(void),
63 if (!ps_tq_active) { 63 if (!ps_tq_active) {
64 ps_tq_active = 1; 64 ps_tq_active = 1;
65 if (!ps_nice) 65 if (!ps_nice)
66 schedule_work(&ps_tq); 66 schedule_delayed_work(&ps_tq, 0);
67 else 67 else
68 schedule_delayed_work(&ps_tq, ps_nice-1); 68 schedule_delayed_work(&ps_tq, ps_nice-1);
69 } 69 }
70 spin_unlock_irqrestore(&ps_spinlock,flags); 70 spin_unlock_irqrestore(&ps_spinlock,flags);
71} 71}
72 72
73static void ps_tq_int(void *data) 73static void ps_tq_int(struct work_struct *work)
74{ 74{
75 void (*con)(void); 75 void (*con)(void);
76 unsigned long flags; 76 unsigned long flags;
@@ -92,7 +92,7 @@ static void ps_tq_int(void *data)
92 } 92 }
93 ps_tq_active = 1; 93 ps_tq_active = 1;
94 if (!ps_nice) 94 if (!ps_nice)
95 schedule_work(&ps_tq); 95 schedule_delayed_work(&ps_tq, 0);
96 else 96 else
97 schedule_delayed_work(&ps_tq, ps_nice-1); 97 schedule_delayed_work(&ps_tq, ps_nice-1);
98 spin_unlock_irqrestore(&ps_spinlock,flags); 98 spin_unlock_irqrestore(&ps_spinlock,flags);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 47d6975268ff..54509eb3391b 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1244,9 +1244,10 @@ out:
1244 return IRQ_RETVAL(handled); 1244 return IRQ_RETVAL(handled);
1245} 1245}
1246 1246
1247static void carm_fsm_task (void *_data) 1247static void carm_fsm_task (struct work_struct *work)
1248{ 1248{
1249 struct carm_host *host = _data; 1249 struct carm_host *host =
1250 container_of(work, struct carm_host, fsm_task);
1250 unsigned long flags; 1251 unsigned long flags;
1251 unsigned int state; 1252 unsigned int state;
1252 int rc, i, next_dev; 1253 int rc, i, next_dev;
@@ -1619,7 +1620,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1619 host->pdev = pdev; 1620 host->pdev = pdev;
1620 host->flags = pci_dac ? FL_DAC : 0; 1621 host->flags = pci_dac ? FL_DAC : 0;
1621 spin_lock_init(&host->lock); 1622 spin_lock_init(&host->lock);
1622 INIT_WORK(&host->fsm_task, carm_fsm_task, host); 1623 INIT_WORK(&host->fsm_task, carm_fsm_task);
1623 init_completion(&host->probe_comp); 1624 init_completion(&host->probe_comp);
1624 1625
1625 for (i = 0; i < ARRAY_SIZE(host->req); i++) 1626 for (i = 0; i < ARRAY_SIZE(host->req); i++)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 0d5c73f07265..2098eff91e14 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -376,7 +376,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
376 int stalled_pipe); 376 int stalled_pipe);
377static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 377static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
378static void ub_reset_enter(struct ub_dev *sc, int try); 378static void ub_reset_enter(struct ub_dev *sc, int try);
379static void ub_reset_task(void *arg); 379static void ub_reset_task(struct work_struct *work);
380static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); 380static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
381static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 381static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
382 struct ub_capacity *ret); 382 struct ub_capacity *ret);
@@ -1558,9 +1558,9 @@ static void ub_reset_enter(struct ub_dev *sc, int try)
1558 schedule_work(&sc->reset_work); 1558 schedule_work(&sc->reset_work);
1559} 1559}
1560 1560
1561static void ub_reset_task(void *arg) 1561static void ub_reset_task(struct work_struct *work)
1562{ 1562{
1563 struct ub_dev *sc = arg; 1563 struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1564 unsigned long flags; 1564 unsigned long flags;
1565 struct list_head *p; 1565 struct list_head *p;
1566 struct ub_lun *lun; 1566 struct ub_lun *lun;
@@ -2179,7 +2179,7 @@ static int ub_probe(struct usb_interface *intf,
2179 usb_init_urb(&sc->work_urb); 2179 usb_init_urb(&sc->work_urb);
2180 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2180 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2181 atomic_set(&sc->poison, 0); 2181 atomic_set(&sc->poison, 0);
2182 INIT_WORK(&sc->reset_work, ub_reset_task, sc); 2182 INIT_WORK(&sc->reset_work, ub_reset_task);
2183 init_waitqueue_head(&sc->reset_wait); 2183 init_waitqueue_head(&sc->reset_wait);
2184 2184
2185 init_timer(&sc->work_timer); 2185 init_timer(&sc->work_timer);
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 516751754aa9..9256985cbe36 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -157,9 +157,10 @@ static void bcm203x_complete(struct urb *urb)
157 } 157 }
158} 158}
159 159
160static void bcm203x_work(void *user_data) 160static void bcm203x_work(struct work_struct *work)
161{ 161{
162 struct bcm203x_data *data = user_data; 162 struct bcm203x_data *data =
163 container_of(work, struct bcm203x_data, work);
163 164
164 if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) 165 if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
165 BT_ERR("Can't submit URB"); 166 BT_ERR("Can't submit URB");
@@ -246,7 +247,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
246 247
247 release_firmware(firmware); 248 release_firmware(firmware);
248 249
249 INIT_WORK(&data->work, bcm203x_work, (void *) data); 250 INIT_WORK(&data->work, bcm203x_work);
250 251
251 usb_set_intfdata(intf, data); 252 usb_set_intfdata(intf, data);
252 253
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index cbc07250b898..acfb6a430dcc 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -892,43 +892,10 @@ static void bluecard_detach(struct pcmcia_device *link)
892} 892}
893 893
894 894
895static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
896{
897 int i;
898
899 i = pcmcia_get_first_tuple(handle, tuple);
900 if (i != CS_SUCCESS)
901 return CS_NO_MORE_ITEMS;
902
903 i = pcmcia_get_tuple_data(handle, tuple);
904 if (i != CS_SUCCESS)
905 return i;
906
907 return pcmcia_parse_tuple(handle, tuple, parse);
908}
909
910static int bluecard_config(struct pcmcia_device *link) 895static int bluecard_config(struct pcmcia_device *link)
911{ 896{
912 bluecard_info_t *info = link->priv; 897 bluecard_info_t *info = link->priv;
913 tuple_t tuple; 898 int i, n;
914 u_short buf[256];
915 cisparse_t parse;
916 int i, n, last_ret, last_fn;
917
918 tuple.TupleData = (cisdata_t *)buf;
919 tuple.TupleOffset = 0;
920 tuple.TupleDataMax = 255;
921 tuple.Attributes = 0;
922
923 /* Get configuration register information */
924 tuple.DesiredTuple = CISTPL_CONFIG;
925 last_ret = first_tuple(link, &tuple, &parse);
926 if (last_ret != CS_SUCCESS) {
927 last_fn = ParseTuple;
928 goto cs_failed;
929 }
930 link->conf.ConfigBase = parse.config.base;
931 link->conf.Present = parse.config.rmask[0];
932 899
933 link->conf.ConfigIndex = 0x20; 900 link->conf.ConfigIndex = 0x20;
934 link->io.NumPorts1 = 64; 901 link->io.NumPorts1 = 64;
@@ -966,9 +933,6 @@ static int bluecard_config(struct pcmcia_device *link)
966 933
967 return 0; 934 return 0;
968 935
969cs_failed:
970 cs_error(link, last_fn, last_ret);
971
972failed: 936failed:
973 bluecard_release(link); 937 bluecard_release(link);
974 return -ENODEV; 938 return -ENODEV;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 3a96a0babc6a..aae3abace586 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -713,22 +713,7 @@ static int bt3c_config(struct pcmcia_device *link)
713 u_short buf[256]; 713 u_short buf[256];
714 cisparse_t parse; 714 cisparse_t parse;
715 cistpl_cftable_entry_t *cf = &parse.cftable_entry; 715 cistpl_cftable_entry_t *cf = &parse.cftable_entry;
716 int i, j, try, last_ret, last_fn; 716 int i, j, try;
717
718 tuple.TupleData = (cisdata_t *)buf;
719 tuple.TupleOffset = 0;
720 tuple.TupleDataMax = 255;
721 tuple.Attributes = 0;
722
723 /* Get configuration register information */
724 tuple.DesiredTuple = CISTPL_CONFIG;
725 last_ret = first_tuple(link, &tuple, &parse);
726 if (last_ret != CS_SUCCESS) {
727 last_fn = ParseTuple;
728 goto cs_failed;
729 }
730 link->conf.ConfigBase = parse.config.base;
731 link->conf.Present = parse.config.rmask[0];
732 717
733 /* First pass: look for a config entry that looks normal. */ 718 /* First pass: look for a config entry that looks normal. */
734 tuple.TupleData = (cisdata_t *)buf; 719 tuple.TupleData = (cisdata_t *)buf;
@@ -802,9 +787,6 @@ found_port:
802 787
803 return 0; 788 return 0;
804 789
805cs_failed:
806 cs_error(link, last_fn, last_ret);
807
808failed: 790failed:
809 bt3c_release(link); 791 bt3c_release(link);
810 return -ENODEV; 792 return -ENODEV;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 3b29086b7c3f..92648ef2f5d0 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -644,22 +644,7 @@ static int btuart_config(struct pcmcia_device *link)
644 u_short buf[256]; 644 u_short buf[256];
645 cisparse_t parse; 645 cisparse_t parse;
646 cistpl_cftable_entry_t *cf = &parse.cftable_entry; 646 cistpl_cftable_entry_t *cf = &parse.cftable_entry;
647 int i, j, try, last_ret, last_fn; 647 int i, j, try;
648
649 tuple.TupleData = (cisdata_t *)buf;
650 tuple.TupleOffset = 0;
651 tuple.TupleDataMax = 255;
652 tuple.Attributes = 0;
653
654 /* Get configuration register information */
655 tuple.DesiredTuple = CISTPL_CONFIG;
656 last_ret = first_tuple(link, &tuple, &parse);
657 if (last_ret != CS_SUCCESS) {
658 last_fn = ParseTuple;
659 goto cs_failed;
660 }
661 link->conf.ConfigBase = parse.config.base;
662 link->conf.Present = parse.config.rmask[0];
663 648
664 /* First pass: look for a config entry that looks normal. */ 649 /* First pass: look for a config entry that looks normal. */
665 tuple.TupleData = (cisdata_t *) buf; 650 tuple.TupleData = (cisdata_t *) buf;
@@ -734,9 +719,6 @@ found_port:
734 719
735 return 0; 720 return 0;
736 721
737cs_failed:
738 cs_error(link, last_fn, last_ret);
739
740failed: 722failed:
741 btuart_release(link); 723 btuart_release(link);
742 return -ENODEV; 724 return -ENODEV;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 07eafbc5dc3a..77b99eecbc49 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -626,22 +626,7 @@ static int dtl1_config(struct pcmcia_device *link)
626 u_short buf[256]; 626 u_short buf[256];
627 cisparse_t parse; 627 cisparse_t parse;
628 cistpl_cftable_entry_t *cf = &parse.cftable_entry; 628 cistpl_cftable_entry_t *cf = &parse.cftable_entry;
629 int i, last_ret, last_fn; 629 int i;
630
631 tuple.TupleData = (cisdata_t *)buf;
632 tuple.TupleOffset = 0;
633 tuple.TupleDataMax = 255;
634 tuple.Attributes = 0;
635
636 /* Get configuration register information */
637 tuple.DesiredTuple = CISTPL_CONFIG;
638 last_ret = first_tuple(link, &tuple, &parse);
639 if (last_ret != CS_SUCCESS) {
640 last_fn = ParseTuple;
641 goto cs_failed;
642 }
643 link->conf.ConfigBase = parse.config.base;
644 link->conf.Present = parse.config.rmask[0];
645 630
646 tuple.TupleData = (cisdata_t *)buf; 631 tuple.TupleData = (cisdata_t *)buf;
647 tuple.TupleOffset = 0; 632 tuple.TupleOffset = 0;
@@ -690,9 +675,6 @@ static int dtl1_config(struct pcmcia_device *link)
690 675
691 return 0; 676 return 0;
692 677
693cs_failed:
694 cs_error(link, last_fn, last_ret);
695
696failed: 678failed:
697 dtl1_release(link); 679 dtl1_release(link);
698 return -ENODEV; 680 return -ENODEV;
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e608dadece2f..acb2de5e3a98 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -926,9 +926,10 @@ cy_sched_event(struct cyclades_port *info, int event)
926 * had to poll every port to see if that port needed servicing. 926 * had to poll every port to see if that port needed servicing.
927 */ 927 */
928static void 928static void
929do_softint(void *private_) 929do_softint(struct work_struct *work)
930{ 930{
931 struct cyclades_port *info = (struct cyclades_port *) private_; 931 struct cyclades_port *info =
932 container_of(work, struct cyclades_port, tqueue);
932 struct tty_struct *tty; 933 struct tty_struct *tty;
933 934
934 tty = info->tty; 935 tty = info->tty;
@@ -5328,7 +5329,7 @@ cy_init(void)
5328 info->blocked_open = 0; 5329 info->blocked_open = 0;
5329 info->default_threshold = 0; 5330 info->default_threshold = 0;
5330 info->default_timeout = 0; 5331 info->default_timeout = 0;
5331 INIT_WORK(&info->tqueue, do_softint, info); 5332 INIT_WORK(&info->tqueue, do_softint);
5332 init_waitqueue_head(&info->open_wait); 5333 init_waitqueue_head(&info->open_wait);
5333 init_waitqueue_head(&info->close_wait); 5334 init_waitqueue_head(&info->close_wait);
5334 init_waitqueue_head(&info->shutdown_wait); 5335 init_waitqueue_head(&info->shutdown_wait);
@@ -5403,7 +5404,7 @@ cy_init(void)
5403 info->blocked_open = 0; 5404 info->blocked_open = 0;
5404 info->default_threshold = 0; 5405 info->default_threshold = 0;
5405 info->default_timeout = 0; 5406 info->default_timeout = 0;
5406 INIT_WORK(&info->tqueue, do_softint, info); 5407 INIT_WORK(&info->tqueue, do_softint);
5407 init_waitqueue_head(&info->open_wait); 5408 init_waitqueue_head(&info->open_wait);
5408 init_waitqueue_head(&info->close_wait); 5409 init_waitqueue_head(&info->close_wait);
5409 init_waitqueue_head(&info->shutdown_wait); 5410 init_waitqueue_head(&info->shutdown_wait);
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 60c1695db300..806f9ce5f47b 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -500,9 +500,9 @@ via_dmablit_timer(unsigned long data)
500 500
501 501
502static void 502static void
503via_dmablit_workqueue(void *data) 503via_dmablit_workqueue(struct work_struct *work)
504{ 504{
505 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; 505 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
506 drm_device_t *dev = blitq->dev; 506 drm_device_t *dev = blitq->dev;
507 unsigned long irqsave; 507 unsigned long irqsave;
508 drm_via_sg_info_t *cur_sg; 508 drm_via_sg_info_t *cur_sg;
@@ -571,7 +571,7 @@ via_init_dmablit(drm_device_t *dev)
571 DRM_INIT_WAITQUEUE(blitq->blit_queue + j); 571 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
572 } 572 }
573 DRM_INIT_WAITQUEUE(&blitq->busy_queue); 573 DRM_INIT_WAITQUEUE(&blitq->busy_queue);
574 INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); 574 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
575 init_timer(&blitq->poll_timer); 575 init_timer(&blitq->poll_timer);
576 blitq->poll_timer.function = &via_dmablit_timer; 576 blitq->poll_timer.function = &via_dmablit_timer;
577 blitq->poll_timer.data = (unsigned long) blitq; 577 blitq->poll_timer.data = (unsigned long) blitq;
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 706733c0b36a..7c71eb779802 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -200,7 +200,7 @@ static int pc_ioctl(struct tty_struct *, struct file *,
200static int info_ioctl(struct tty_struct *, struct file *, 200static int info_ioctl(struct tty_struct *, struct file *,
201 unsigned int, unsigned long); 201 unsigned int, unsigned long);
202static void pc_set_termios(struct tty_struct *, struct termios *); 202static void pc_set_termios(struct tty_struct *, struct termios *);
203static void do_softint(void *); 203static void do_softint(struct work_struct *work);
204static void pc_stop(struct tty_struct *); 204static void pc_stop(struct tty_struct *);
205static void pc_start(struct tty_struct *); 205static void pc_start(struct tty_struct *);
206static void pc_throttle(struct tty_struct * tty); 206static void pc_throttle(struct tty_struct * tty);
@@ -1505,7 +1505,7 @@ static void post_fep_init(unsigned int crd)
1505 1505
1506 ch->brdchan = bc; 1506 ch->brdchan = bc;
1507 ch->mailbox = gd; 1507 ch->mailbox = gd;
1508 INIT_WORK(&ch->tqueue, do_softint, ch); 1508 INIT_WORK(&ch->tqueue, do_softint);
1509 ch->board = &boards[crd]; 1509 ch->board = &boards[crd];
1510 1510
1511 spin_lock_irqsave(&epca_lock, flags); 1511 spin_lock_irqsave(&epca_lock, flags);
@@ -2566,9 +2566,9 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
2566 2566
2567/* --------------------- Begin do_softint ----------------------- */ 2567/* --------------------- Begin do_softint ----------------------- */
2568 2568
2569static void do_softint(void *private_) 2569static void do_softint(struct work_struct *work)
2570{ /* Begin do_softint */ 2570{ /* Begin do_softint */
2571 struct channel *ch = (struct channel *) private_; 2571 struct channel *ch = container_of(work, struct channel, tqueue);
2572 /* Called in response to a modem change event */ 2572 /* Called in response to a modem change event */
2573 if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */ 2573 if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */
2574 struct tty_struct *tty = ch->tty; 2574 struct tty_struct *tty = ch->tty;
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 15a4ea896328..93b551962513 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -723,9 +723,10 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
723 * ------------------------------------------------------------------- 723 * -------------------------------------------------------------------
724 */ 724 */
725 725
726static void do_softint(void *private_) 726static void do_softint(struct work_struct *work)
727{ 727{
728 struct esp_struct *info = (struct esp_struct *) private_; 728 struct esp_struct *info =
729 container_of(work, struct esp_struct, tqueue);
729 struct tty_struct *tty; 730 struct tty_struct *tty;
730 731
731 tty = info->tty; 732 tty = info->tty;
@@ -746,9 +747,10 @@ static void do_softint(void *private_)
746 * do_serial_hangup() -> tty->hangup() -> esp_hangup() 747 * do_serial_hangup() -> tty->hangup() -> esp_hangup()
747 * 748 *
748 */ 749 */
749static void do_serial_hangup(void *private_) 750static void do_serial_hangup(struct work_struct *work)
750{ 751{
751 struct esp_struct *info = (struct esp_struct *) private_; 752 struct esp_struct *info =
753 container_of(work, struct esp_struct, tqueue_hangup);
752 struct tty_struct *tty; 754 struct tty_struct *tty;
753 755
754 tty = info->tty; 756 tty = info->tty;
@@ -2501,8 +2503,8 @@ static int __init espserial_init(void)
2501 info->magic = ESP_MAGIC; 2503 info->magic = ESP_MAGIC;
2502 info->close_delay = 5*HZ/10; 2504 info->close_delay = 5*HZ/10;
2503 info->closing_wait = 30*HZ; 2505 info->closing_wait = 30*HZ;
2504 INIT_WORK(&info->tqueue, do_softint, info); 2506 INIT_WORK(&info->tqueue, do_softint);
2505 INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info); 2507 INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
2506 info->config.rx_timeout = rx_timeout; 2508 info->config.rx_timeout = rx_timeout;
2507 info->config.flow_on = flow_on; 2509 info->config.flow_on = flow_on;
2508 info->config.flow_off = flow_off; 2510 info->config.flow_off = flow_off;
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 817dc409ac20..23b25ada65ea 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -102,7 +102,7 @@ static void gen_rtc_interrupt(unsigned long arg);
102 * Routine to poll RTC seconds field for change as often as possible, 102 * Routine to poll RTC seconds field for change as often as possible,
103 * after first RTC_UIE use timer to reduce polling 103 * after first RTC_UIE use timer to reduce polling
104 */ 104 */
105static void genrtc_troutine(void *data) 105static void genrtc_troutine(struct work_struct *work)
106{ 106{
107 unsigned int tmp = get_rtc_ss(); 107 unsigned int tmp = get_rtc_ss();
108 108
@@ -255,7 +255,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit)
255 irq_active = 1; 255 irq_active = 1;
256 stop_rtc_timers = 0; 256 stop_rtc_timers = 0;
257 lostint = 0; 257 lostint = 0;
258 INIT_WORK(&genrtc_task, genrtc_troutine, NULL); 258 INIT_WORK(&genrtc_task, genrtc_troutine);
259 oldsecs = get_rtc_ss(); 259 oldsecs = get_rtc_ss();
260 init_timer(&timer_task); 260 init_timer(&timer_task);
261 261
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 2cf63e7305a3..82a41d5b4ed0 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -69,7 +69,7 @@
69#define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) 69#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
70 70
71struct hvsi_struct { 71struct hvsi_struct {
72 struct work_struct writer; 72 struct delayed_work writer;
73 struct work_struct handshaker; 73 struct work_struct handshaker;
74 wait_queue_head_t emptyq; /* woken when outbuf is emptied */ 74 wait_queue_head_t emptyq; /* woken when outbuf is emptied */
75 wait_queue_head_t stateq; /* woken when HVSI state changes */ 75 wait_queue_head_t stateq; /* woken when HVSI state changes */
@@ -744,9 +744,10 @@ static int hvsi_handshake(struct hvsi_struct *hp)
744 return 0; 744 return 0;
745} 745}
746 746
747static void hvsi_handshaker(void *arg) 747static void hvsi_handshaker(struct work_struct *work)
748{ 748{
749 struct hvsi_struct *hp = (struct hvsi_struct *)arg; 749 struct hvsi_struct *hp =
750 container_of(work, struct hvsi_struct, handshaker);
750 751
751 if (hvsi_handshake(hp) >= 0) 752 if (hvsi_handshake(hp) >= 0)
752 return; 753 return;
@@ -951,9 +952,10 @@ static void hvsi_push(struct hvsi_struct *hp)
951} 952}
952 953
953/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ 954/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
954static void hvsi_write_worker(void *arg) 955static void hvsi_write_worker(struct work_struct *work)
955{ 956{
956 struct hvsi_struct *hp = (struct hvsi_struct *)arg; 957 struct hvsi_struct *hp =
958 container_of(work, struct hvsi_struct, writer.work);
957 unsigned long flags; 959 unsigned long flags;
958#ifdef DEBUG 960#ifdef DEBUG
959 static long start_j = 0; 961 static long start_j = 0;
@@ -1287,8 +1289,8 @@ static int __init hvsi_console_init(void)
1287 } 1289 }
1288 1290
1289 hp = &hvsi_ports[hvsi_count]; 1291 hp = &hvsi_ports[hvsi_count];
1290 INIT_WORK(&hp->writer, hvsi_write_worker, hp); 1292 INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
1291 INIT_WORK(&hp->handshaker, hvsi_handshaker, hp); 1293 INIT_WORK(&hp->handshaker, hvsi_handshaker);
1292 init_waitqueue_head(&hp->emptyq); 1294 init_waitqueue_head(&hp->emptyq);
1293 init_waitqueue_head(&hp->stateq); 1295 init_waitqueue_head(&hp->stateq);
1294 spin_lock_init(&hp->lock); 1296 spin_lock_init(&hp->lock);
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index 54d93f0345e8..c213fdbdb2b0 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -84,8 +84,8 @@ static void iiSendPendingMail(i2eBordStrPtr);
84static void serviceOutgoingFifo(i2eBordStrPtr); 84static void serviceOutgoingFifo(i2eBordStrPtr);
85 85
86// Functions defined in ip2.c as part of interrupt handling 86// Functions defined in ip2.c as part of interrupt handling
87static void do_input(void *); 87static void do_input(struct work_struct *);
88static void do_status(void *); 88static void do_status(struct work_struct *);
89 89
90//*************** 90//***************
91//* Debug Data * 91//* Debug Data *
@@ -331,8 +331,8 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
331 pCh->ClosingWaitTime = 30*HZ; 331 pCh->ClosingWaitTime = 30*HZ;
332 332
333 // Initialize task queue objects 333 // Initialize task queue objects
334 INIT_WORK(&pCh->tqueue_input, do_input, pCh); 334 INIT_WORK(&pCh->tqueue_input, do_input);
335 INIT_WORK(&pCh->tqueue_status, do_status, pCh); 335 INIT_WORK(&pCh->tqueue_status, do_status);
336 336
337#ifdef IP2DEBUG_TRACE 337#ifdef IP2DEBUG_TRACE
338 pCh->trace = ip2trace; 338 pCh->trace = ip2trace;
@@ -1573,7 +1573,7 @@ i2StripFifo(i2eBordStrPtr pB)
1573#ifdef USE_IQ 1573#ifdef USE_IQ
1574 schedule_work(&pCh->tqueue_input); 1574 schedule_work(&pCh->tqueue_input);
1575#else 1575#else
1576 do_input(pCh); 1576 do_input(&pCh->tqueue_input);
1577#endif 1577#endif
1578 1578
1579 // Note we do not need to maintain any flow-control credits at this 1579 // Note we do not need to maintain any flow-control credits at this
@@ -1810,7 +1810,7 @@ i2StripFifo(i2eBordStrPtr pB)
1810#ifdef USE_IQ 1810#ifdef USE_IQ
1811 schedule_work(&pCh->tqueue_status); 1811 schedule_work(&pCh->tqueue_status);
1812#else 1812#else
1813 do_status(pCh); 1813 do_status(&pCh->tqueue_status);
1814#endif 1814#endif
1815 } 1815 }
1816 } 1816 }
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index a3f32d46d2f8..cda2459c1d60 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -189,12 +189,12 @@ static int ip2_tiocmset(struct tty_struct *tty, struct file *file,
189 unsigned int set, unsigned int clear); 189 unsigned int set, unsigned int clear);
190 190
191static void set_irq(int, int); 191static void set_irq(int, int);
192static void ip2_interrupt_bh(i2eBordStrPtr pB); 192static void ip2_interrupt_bh(struct work_struct *work);
193static irqreturn_t ip2_interrupt(int irq, void *dev_id); 193static irqreturn_t ip2_interrupt(int irq, void *dev_id);
194static void ip2_poll(unsigned long arg); 194static void ip2_poll(unsigned long arg);
195static inline void service_all_boards(void); 195static inline void service_all_boards(void);
196static void do_input(void *p); 196static void do_input(struct work_struct *);
197static void do_status(void *p); 197static void do_status(struct work_struct *);
198 198
199static void ip2_wait_until_sent(PTTY,int); 199static void ip2_wait_until_sent(PTTY,int);
200 200
@@ -918,7 +918,7 @@ ip2_init_board( int boardnum )
918 pCh++; 918 pCh++;
919 } 919 }
920ex_exit: 920ex_exit:
921 INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB); 921 INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
922 return; 922 return;
923 923
924err_release_region: 924err_release_region:
@@ -1125,8 +1125,8 @@ service_all_boards(void)
1125 1125
1126 1126
1127/******************************************************************************/ 1127/******************************************************************************/
1128/* Function: ip2_interrupt_bh(pB) */ 1128/* Function: ip2_interrupt_bh(work) */
1129/* Parameters: pB - pointer to the board structure */ 1129/* Parameters: work - pointer to the board structure */
1130/* Returns: Nothing */ 1130/* Returns: Nothing */
1131/* */ 1131/* */
1132/* Description: */ 1132/* Description: */
@@ -1135,8 +1135,9 @@ service_all_boards(void)
1135/* */ 1135/* */
1136/******************************************************************************/ 1136/******************************************************************************/
1137static void 1137static void
1138ip2_interrupt_bh(i2eBordStrPtr pB) 1138ip2_interrupt_bh(struct work_struct *work)
1139{ 1139{
1140 i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
1140// pB better well be set or we have a problem! We can only get 1141// pB better well be set or we have a problem! We can only get
1141// here from the IMMEDIATE queue. Here, we process the boards. 1142// here from the IMMEDIATE queue. Here, we process the boards.
1142// Checking pB doesn't cost much and it saves us from the sanity checkers. 1143// Checking pB doesn't cost much and it saves us from the sanity checkers.
@@ -1245,9 +1246,9 @@ ip2_poll(unsigned long arg)
1245 ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 ); 1246 ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
1246} 1247}
1247 1248
1248static void do_input(void *p) 1249static void do_input(struct work_struct *work)
1249{ 1250{
1250 i2ChanStrPtr pCh = p; 1251 i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
1251 unsigned long flags; 1252 unsigned long flags;
1252 1253
1253 ip2trace(CHANN, ITRC_INPUT, 21, 0 ); 1254 ip2trace(CHANN, ITRC_INPUT, 21, 0 );
@@ -1279,9 +1280,9 @@ static inline void isig(int sig, struct tty_struct *tty, int flush)
1279 } 1280 }
1280} 1281}
1281 1282
1282static void do_status(void *p) 1283static void do_status(struct work_struct *work)
1283{ 1284{
1284 i2ChanStrPtr pCh = p; 1285 i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
1285 int status; 1286 int status;
1286 1287
1287 status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) ); 1288 status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 58c955e390b3..1637c1d9a4ba 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -530,9 +530,9 @@ sched_again:
530/* Interrupt handlers */ 530/* Interrupt handlers */
531 531
532 532
533static void isicom_bottomhalf(void *data) 533static void isicom_bottomhalf(struct work_struct *work)
534{ 534{
535 struct isi_port *port = (struct isi_port *) data; 535 struct isi_port *port = container_of(work, struct isi_port, bh_tqueue);
536 struct tty_struct *tty = port->tty; 536 struct tty_struct *tty = port->tty;
537 537
538 if (!tty) 538 if (!tty)
@@ -1474,9 +1474,9 @@ static void isicom_start(struct tty_struct *tty)
1474} 1474}
1475 1475
1476/* hangup et all */ 1476/* hangup et all */
1477static void do_isicom_hangup(void *data) 1477static void do_isicom_hangup(struct work_struct *work)
1478{ 1478{
1479 struct isi_port *port = data; 1479 struct isi_port *port = container_of(work, struct isi_port, hangup_tq);
1480 struct tty_struct *tty; 1480 struct tty_struct *tty;
1481 1481
1482 tty = port->tty; 1482 tty = port->tty;
@@ -1966,8 +1966,8 @@ static int __devinit isicom_setup(void)
1966 port->channel = channel; 1966 port->channel = channel;
1967 port->close_delay = 50 * HZ/100; 1967 port->close_delay = 50 * HZ/100;
1968 port->closing_wait = 3000 * HZ/100; 1968 port->closing_wait = 3000 * HZ/100;
1969 INIT_WORK(&port->hangup_tq, do_isicom_hangup, port); 1969 INIT_WORK(&port->hangup_tq, do_isicom_hangup);
1970 INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port); 1970 INIT_WORK(&port->bh_tqueue, isicom_bottomhalf);
1971 port->status = 0; 1971 port->status = 0;
1972 init_waitqueue_head(&port->open_wait); 1972 init_waitqueue_head(&port->open_wait);
1973 init_waitqueue_head(&port->close_wait); 1973 init_waitqueue_head(&port->close_wait);
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 96cb1f07332b..2d025a9fd14d 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -222,7 +222,7 @@ static struct semaphore moxaBuffSem;
222/* 222/*
223 * static functions: 223 * static functions:
224 */ 224 */
225static void do_moxa_softint(void *); 225static void do_moxa_softint(struct work_struct *);
226static int moxa_open(struct tty_struct *, struct file *); 226static int moxa_open(struct tty_struct *, struct file *);
227static void moxa_close(struct tty_struct *, struct file *); 227static void moxa_close(struct tty_struct *, struct file *);
228static int moxa_write(struct tty_struct *, const unsigned char *, int); 228static int moxa_write(struct tty_struct *, const unsigned char *, int);
@@ -363,7 +363,7 @@ static int __init moxa_init(void)
363 for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) { 363 for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
364 ch->type = PORT_16550A; 364 ch->type = PORT_16550A;
365 ch->port = i; 365 ch->port = i;
366 INIT_WORK(&ch->tqueue, do_moxa_softint, ch); 366 INIT_WORK(&ch->tqueue, do_moxa_softint);
367 ch->tty = NULL; 367 ch->tty = NULL;
368 ch->close_delay = 5 * HZ / 10; 368 ch->close_delay = 5 * HZ / 10;
369 ch->closing_wait = 30 * HZ; 369 ch->closing_wait = 30 * HZ;
@@ -509,9 +509,9 @@ static void __exit moxa_exit(void)
509module_init(moxa_init); 509module_init(moxa_init);
510module_exit(moxa_exit); 510module_exit(moxa_exit);
511 511
512static void do_moxa_softint(void *private_) 512static void do_moxa_softint(struct work_struct *work)
513{ 513{
514 struct moxa_str *ch = (struct moxa_str *) private_; 514 struct moxa_str *ch = container_of(work, struct moxa_str, tqueue);
515 struct tty_struct *tty; 515 struct tty_struct *tty;
516 516
517 if (ch && (tty = ch->tty)) { 517 if (ch && (tty = ch->tty)) {
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 048d91142c17..5ed2486b7581 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -389,7 +389,7 @@ static int mxser_init(void);
389/* static void mxser_poll(unsigned long); */ 389/* static void mxser_poll(unsigned long); */
390static int mxser_get_ISA_conf(int, struct mxser_hwconf *); 390static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
391static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *); 391static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
392static void mxser_do_softint(void *); 392static void mxser_do_softint(struct work_struct *);
393static int mxser_open(struct tty_struct *, struct file *); 393static int mxser_open(struct tty_struct *, struct file *);
394static void mxser_close(struct tty_struct *, struct file *); 394static void mxser_close(struct tty_struct *, struct file *);
395static int mxser_write(struct tty_struct *, const unsigned char *, int); 395static int mxser_write(struct tty_struct *, const unsigned char *, int);
@@ -590,7 +590,7 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf)
590 info->custom_divisor = hwconf->baud_base[i] * 16; 590 info->custom_divisor = hwconf->baud_base[i] * 16;
591 info->close_delay = 5 * HZ / 10; 591 info->close_delay = 5 * HZ / 10;
592 info->closing_wait = 30 * HZ; 592 info->closing_wait = 30 * HZ;
593 INIT_WORK(&info->tqueue, mxser_do_softint, info); 593 INIT_WORK(&info->tqueue, mxser_do_softint);
594 info->normal_termios = mxvar_sdriver->init_termios; 594 info->normal_termios = mxvar_sdriver->init_termios;
595 init_waitqueue_head(&info->open_wait); 595 init_waitqueue_head(&info->open_wait);
596 init_waitqueue_head(&info->close_wait); 596 init_waitqueue_head(&info->close_wait);
@@ -917,9 +917,10 @@ static int mxser_init(void)
917 return 0; 917 return 0;
918} 918}
919 919
920static void mxser_do_softint(void *private_) 920static void mxser_do_softint(struct work_struct *work)
921{ 921{
922 struct mxser_struct *info = private_; 922 struct mxser_struct *info =
923 container_of(work, struct mxser_struct, tqueue);
923 struct tty_struct *tty; 924 struct tty_struct *tty;
924 925
925 tty = info->tty; 926 tty = info->tty;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 50d20aafeb18..211c93fda6fc 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1764,29 +1764,11 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
1764 int rc; 1764 int rc;
1765 1765
1766 /* read the config-tuples */ 1766 /* read the config-tuples */
1767 tuple.DesiredTuple = CISTPL_CONFIG;
1768 tuple.Attributes = 0; 1767 tuple.Attributes = 0;
1769 tuple.TupleData = buf; 1768 tuple.TupleData = buf;
1770 tuple.TupleDataMax = sizeof(buf); 1769 tuple.TupleDataMax = sizeof(buf);
1771 tuple.TupleOffset = 0; 1770 tuple.TupleOffset = 0;
1772 1771
1773 if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
1774 fail_fn = GetFirstTuple;
1775 goto cs_failed;
1776 }
1777 if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
1778 fail_fn = GetTupleData;
1779 goto cs_failed;
1780 }
1781 if ((fail_rc =
1782 pcmcia_parse_tuple(link, &tuple, &parse)) != CS_SUCCESS) {
1783 fail_fn = ParseTuple;
1784 goto cs_failed;
1785 }
1786
1787 link->conf.ConfigBase = parse.config.base;
1788 link->conf.Present = parse.config.rmask[0];
1789
1790 link->io.BasePort2 = 0; 1772 link->io.BasePort2 = 0;
1791 link->io.NumPorts2 = 0; 1773 link->io.NumPorts2 = 0;
1792 link->io.Attributes2 = 0; 1774 link->io.Attributes2 = 0;
@@ -1841,8 +1823,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
1841 1823
1842 return 0; 1824 return 0;
1843 1825
1844cs_failed:
1845 cs_error(link, fail_fn, fail_rc);
1846cs_release: 1826cs_release:
1847 cm4000_release(link); 1827 cm4000_release(link);
1848 return -ENODEV; 1828 return -ENODEV;
@@ -1973,14 +1953,14 @@ static int __init cmm_init(void)
1973 printk(KERN_INFO "%s\n", version); 1953 printk(KERN_INFO "%s\n", version);
1974 1954
1975 cmm_class = class_create(THIS_MODULE, "cardman_4000"); 1955 cmm_class = class_create(THIS_MODULE, "cardman_4000");
1976 if (!cmm_class) 1956 if (IS_ERR(cmm_class))
1977 return -1; 1957 return PTR_ERR(cmm_class);
1978 1958
1979 major = register_chrdev(0, DEVICE_NAME, &cm4000_fops); 1959 major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
1980 if (major < 0) { 1960 if (major < 0) {
1981 printk(KERN_WARNING MODULE_NAME 1961 printk(KERN_WARNING MODULE_NAME
1982 ": could not get major number\n"); 1962 ": could not get major number\n");
1983 return -1; 1963 return major;
1984 } 1964 }
1985 1965
1986 rc = pcmcia_register_driver(&cm4000_driver); 1966 rc = pcmcia_register_driver(&cm4000_driver);
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 55cf4be42976..9b1ff7e8f896 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -523,29 +523,11 @@ static int reader_config(struct pcmcia_device *link, int devno)
523 int fail_fn, fail_rc; 523 int fail_fn, fail_rc;
524 int rc; 524 int rc;
525 525
526 tuple.DesiredTuple = CISTPL_CONFIG;
527 tuple.Attributes = 0; 526 tuple.Attributes = 0;
528 tuple.TupleData = buf; 527 tuple.TupleData = buf;
529 tuple.TupleDataMax = sizeof(buf); 528 tuple.TupleDataMax = sizeof(buf);
530 tuple.TupleOffset = 0; 529 tuple.TupleOffset = 0;
531 530
532 if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
533 fail_fn = GetFirstTuple;
534 goto cs_failed;
535 }
536 if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
537 fail_fn = GetTupleData;
538 goto cs_failed;
539 }
540 if ((fail_rc = pcmcia_parse_tuple(link, &tuple, &parse))
541 != CS_SUCCESS) {
542 fail_fn = ParseTuple;
543 goto cs_failed;
544 }
545
546 link->conf.ConfigBase = parse.config.base;
547 link->conf.Present = parse.config.rmask[0];
548
549 link->io.BasePort2 = 0; 531 link->io.BasePort2 = 0;
550 link->io.NumPorts2 = 0; 532 link->io.NumPorts2 = 0;
551 link->io.Attributes2 = 0; 533 link->io.Attributes2 = 0;
@@ -609,8 +591,6 @@ static int reader_config(struct pcmcia_device *link, int devno)
609 591
610 return 0; 592 return 0;
611 593
612cs_failed:
613 cs_error(link, fail_fn, fail_rc);
614cs_release: 594cs_release:
615 reader_release(link); 595 reader_release(link);
616 return -ENODEV; 596 return -ENODEV;
@@ -721,14 +701,14 @@ static int __init cm4040_init(void)
721 701
722 printk(KERN_INFO "%s\n", version); 702 printk(KERN_INFO "%s\n", version);
723 cmx_class = class_create(THIS_MODULE, "cardman_4040"); 703 cmx_class = class_create(THIS_MODULE, "cardman_4040");
724 if (!cmx_class) 704 if (IS_ERR(cmx_class))
725 return -1; 705 return PTR_ERR(cmx_class);
726 706
727 major = register_chrdev(0, DEVICE_NAME, &reader_fops); 707 major = register_chrdev(0, DEVICE_NAME, &reader_fops);
728 if (major < 0) { 708 if (major < 0) {
729 printk(KERN_WARNING MODULE_NAME 709 printk(KERN_WARNING MODULE_NAME
730 ": could not get major number\n"); 710 ": could not get major number\n");
731 return -1; 711 return major;
732 } 712 }
733 713
734 rc = pcmcia_register_driver(&reader_driver); 714 rc = pcmcia_register_driver(&reader_driver);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 1a0bc30b79d1..1bd12296dca5 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -421,7 +421,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id);
421/* 421/*
422 * Bottom half interrupt handlers 422 * Bottom half interrupt handlers
423 */ 423 */
424static void bh_handler(void* Context); 424static void bh_handler(struct work_struct *work);
425static void bh_transmit(MGSLPC_INFO *info); 425static void bh_transmit(MGSLPC_INFO *info);
426static void bh_status(MGSLPC_INFO *info); 426static void bh_status(MGSLPC_INFO *info);
427 427
@@ -547,7 +547,7 @@ static int mgslpc_probe(struct pcmcia_device *link)
547 547
548 memset(info, 0, sizeof(MGSLPC_INFO)); 548 memset(info, 0, sizeof(MGSLPC_INFO));
549 info->magic = MGSLPC_MAGIC; 549 info->magic = MGSLPC_MAGIC;
550 INIT_WORK(&info->task, bh_handler, info); 550 INIT_WORK(&info->task, bh_handler);
551 info->max_frame_size = 4096; 551 info->max_frame_size = 4096;
552 info->close_delay = 5*HZ/10; 552 info->close_delay = 5*HZ/10;
553 info->closing_wait = 30*HZ; 553 info->closing_wait = 30*HZ;
@@ -604,17 +604,10 @@ static int mgslpc_config(struct pcmcia_device *link)
604 if (debug_level >= DEBUG_LEVEL_INFO) 604 if (debug_level >= DEBUG_LEVEL_INFO)
605 printk("mgslpc_config(0x%p)\n", link); 605 printk("mgslpc_config(0x%p)\n", link);
606 606
607 /* read CONFIG tuple to find its configuration registers */
608 tuple.DesiredTuple = CISTPL_CONFIG;
609 tuple.Attributes = 0; 607 tuple.Attributes = 0;
610 tuple.TupleData = buf; 608 tuple.TupleData = buf;
611 tuple.TupleDataMax = sizeof(buf); 609 tuple.TupleDataMax = sizeof(buf);
612 tuple.TupleOffset = 0; 610 tuple.TupleOffset = 0;
613 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
614 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
615 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
616 link->conf.ConfigBase = parse.config.base;
617 link->conf.Present = parse.config.rmask[0];
618 611
619 /* get CIS configuration entry */ 612 /* get CIS configuration entry */
620 613
@@ -842,9 +835,9 @@ static int bh_action(MGSLPC_INFO *info)
842 return rc; 835 return rc;
843} 836}
844 837
845static void bh_handler(void* Context) 838static void bh_handler(struct work_struct *work)
846{ 839{
847 MGSLPC_INFO *info = (MGSLPC_INFO*)Context; 840 MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
848 int action; 841 int action;
849 842
850 if (!info) 843 if (!info)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d40df30c2b10..4c6782a1ecdb 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1422,9 +1422,9 @@ static struct keydata {
1422 1422
1423static unsigned int ip_cnt; 1423static unsigned int ip_cnt;
1424 1424
1425static void rekey_seq_generator(void *private_); 1425static void rekey_seq_generator(struct work_struct *work);
1426 1426
1427static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL); 1427static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
1428 1428
1429/* 1429/*
1430 * Lock avoidance: 1430 * Lock avoidance:
@@ -1438,7 +1438,7 @@ static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
1438 * happen, and even if that happens only a not perfectly compliant 1438 * happen, and even if that happens only a not perfectly compliant
1439 * ISN is generated, nothing fatal. 1439 * ISN is generated, nothing fatal.
1440 */ 1440 */
1441static void rekey_seq_generator(void *private_) 1441static void rekey_seq_generator(struct work_struct *work)
1442{ 1442{
1443 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; 1443 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
1444 1444
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index c084149153de..fc87070f1866 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -765,7 +765,7 @@ static void sonypi_setbluetoothpower(u8 state)
765 sonypi_device.bluetooth_power = state; 765 sonypi_device.bluetooth_power = state;
766} 766}
767 767
768static void input_keyrelease(void *data) 768static void input_keyrelease(struct work_struct *work)
769{ 769{
770 struct sonypi_keypress kp; 770 struct sonypi_keypress kp;
771 771
@@ -1412,7 +1412,7 @@ static int __devinit sonypi_probe(struct platform_device *dev)
1412 goto err_inpdev_unregister; 1412 goto err_inpdev_unregister;
1413 } 1413 }
1414 1414
1415 INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL); 1415 INIT_WORK(&sonypi_device.input_work, input_keyrelease);
1416 } 1416 }
1417 1417
1418 sonypi_enable(0); 1418 sonypi_enable(0);
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 7e1bd9562c2a..99137ab66b62 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2261,9 +2261,10 @@ static void sx_start(struct tty_struct * tty)
2261 * do_sx_hangup() -> tty->hangup() -> sx_hangup() 2261 * do_sx_hangup() -> tty->hangup() -> sx_hangup()
2262 * 2262 *
2263 */ 2263 */
2264static void do_sx_hangup(void *private_) 2264static void do_sx_hangup(struct work_struct *work)
2265{ 2265{
2266 struct specialix_port *port = (struct specialix_port *) private_; 2266 struct specialix_port *port =
2267 container_of(work, struct specialix_port, tqueue_hangup);
2267 struct tty_struct *tty; 2268 struct tty_struct *tty;
2268 2269
2269 func_enter(); 2270 func_enter();
@@ -2336,9 +2337,10 @@ static void sx_set_termios(struct tty_struct * tty, struct termios * old_termios
2336} 2337}
2337 2338
2338 2339
2339static void do_softint(void *private_) 2340static void do_softint(struct work_struct *work)
2340{ 2341{
2341 struct specialix_port *port = (struct specialix_port *) private_; 2342 struct specialix_port *port =
2343 container_of(work, struct specialix_port, tqueue);
2342 struct tty_struct *tty; 2344 struct tty_struct *tty;
2343 2345
2344 func_enter(); 2346 func_enter();
@@ -2411,8 +2413,8 @@ static int sx_init_drivers(void)
2411 memset(sx_port, 0, sizeof(sx_port)); 2413 memset(sx_port, 0, sizeof(sx_port));
2412 for (i = 0; i < SX_NPORT * SX_NBOARD; i++) { 2414 for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
2413 sx_port[i].magic = SPECIALIX_MAGIC; 2415 sx_port[i].magic = SPECIALIX_MAGIC;
2414 INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]); 2416 INIT_WORK(&sx_port[i].tqueue, do_softint);
2415 INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]); 2417 INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
2416 sx_port[i].close_delay = 50 * HZ/100; 2418 sx_port[i].close_delay = 50 * HZ/100;
2417 sx_port[i].closing_wait = 3000 * HZ/100; 2419 sx_port[i].closing_wait = 3000 * HZ/100;
2418 init_waitqueue_head(&sx_port[i].open_wait); 2420 init_waitqueue_head(&sx_port[i].open_wait);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 06784adcc35c..147c30da81ea 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -802,7 +802,7 @@ static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, u
802/* 802/*
803 * Bottom half interrupt handlers 803 * Bottom half interrupt handlers
804 */ 804 */
805static void mgsl_bh_handler(void* Context); 805static void mgsl_bh_handler(struct work_struct *work);
806static void mgsl_bh_receive(struct mgsl_struct *info); 806static void mgsl_bh_receive(struct mgsl_struct *info);
807static void mgsl_bh_transmit(struct mgsl_struct *info); 807static void mgsl_bh_transmit(struct mgsl_struct *info);
808static void mgsl_bh_status(struct mgsl_struct *info); 808static void mgsl_bh_status(struct mgsl_struct *info);
@@ -1071,9 +1071,10 @@ static int mgsl_bh_action(struct mgsl_struct *info)
1071/* 1071/*
1072 * Perform bottom half processing of work items queued by ISR. 1072 * Perform bottom half processing of work items queued by ISR.
1073 */ 1073 */
1074static void mgsl_bh_handler(void* Context) 1074static void mgsl_bh_handler(struct work_struct *work)
1075{ 1075{
1076 struct mgsl_struct *info = (struct mgsl_struct*)Context; 1076 struct mgsl_struct *info =
1077 container_of(work, struct mgsl_struct, task);
1077 int action; 1078 int action;
1078 1079
1079 if (!info) 1080 if (!info)
@@ -4337,7 +4338,7 @@ static struct mgsl_struct* mgsl_allocate_device(void)
4337 } else { 4338 } else {
4338 memset(info, 0, sizeof(struct mgsl_struct)); 4339 memset(info, 0, sizeof(struct mgsl_struct));
4339 info->magic = MGSL_MAGIC; 4340 info->magic = MGSL_MAGIC;
4340 INIT_WORK(&info->task, mgsl_bh_handler, info); 4341 INIT_WORK(&info->task, mgsl_bh_handler);
4341 info->max_frame_size = 4096; 4342 info->max_frame_size = 4096;
4342 info->close_delay = 5*HZ/10; 4343 info->close_delay = 5*HZ/10;
4343 info->closing_wait = 30*HZ; 4344 info->closing_wait = 30*HZ;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index d4334c79f8d4..07f34d43dc7f 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -485,7 +485,7 @@ static void enable_loopback(struct slgt_info *info);
485static void set_rate(struct slgt_info *info, u32 data_rate); 485static void set_rate(struct slgt_info *info, u32 data_rate);
486 486
487static int bh_action(struct slgt_info *info); 487static int bh_action(struct slgt_info *info);
488static void bh_handler(void* context); 488static void bh_handler(struct work_struct *work);
489static void bh_transmit(struct slgt_info *info); 489static void bh_transmit(struct slgt_info *info);
490static void isr_serial(struct slgt_info *info); 490static void isr_serial(struct slgt_info *info);
491static void isr_rdma(struct slgt_info *info); 491static void isr_rdma(struct slgt_info *info);
@@ -1878,9 +1878,9 @@ static int bh_action(struct slgt_info *info)
1878/* 1878/*
1879 * perform bottom half processing 1879 * perform bottom half processing
1880 */ 1880 */
1881static void bh_handler(void* context) 1881static void bh_handler(struct work_struct *work)
1882{ 1882{
1883 struct slgt_info *info = context; 1883 struct slgt_info *info = container_of(work, struct slgt_info, task);
1884 int action; 1884 int action;
1885 1885
1886 if (!info) 1886 if (!info)
@@ -3326,7 +3326,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
3326 } else { 3326 } else {
3327 memset(info, 0, sizeof(struct slgt_info)); 3327 memset(info, 0, sizeof(struct slgt_info));
3328 info->magic = MGSL_MAGIC; 3328 info->magic = MGSL_MAGIC;
3329 INIT_WORK(&info->task, bh_handler, info); 3329 INIT_WORK(&info->task, bh_handler);
3330 info->max_frame_size = 4096; 3330 info->max_frame_size = 4096;
3331 info->raw_rx_size = DMABUFSIZE; 3331 info->raw_rx_size = DMABUFSIZE;
3332 info->close_delay = 5*HZ/10; 3332 info->close_delay = 5*HZ/10;
@@ -4799,6 +4799,6 @@ static void rx_timeout(unsigned long context)
4799 spin_lock_irqsave(&info->lock, flags); 4799 spin_lock_irqsave(&info->lock, flags);
4800 info->pending_bh |= BH_RECEIVE; 4800 info->pending_bh |= BH_RECEIVE;
4801 spin_unlock_irqrestore(&info->lock, flags); 4801 spin_unlock_irqrestore(&info->lock, flags);
4802 bh_handler(info); 4802 bh_handler(&info->task);
4803} 4803}
4804 4804
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 3e932b681371..13a57245cf2e 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -602,7 +602,7 @@ static void enable_loopback(SLMP_INFO *info, int enable);
602static void set_rate(SLMP_INFO *info, u32 data_rate); 602static void set_rate(SLMP_INFO *info, u32 data_rate);
603 603
604static int bh_action(SLMP_INFO *info); 604static int bh_action(SLMP_INFO *info);
605static void bh_handler(void* Context); 605static void bh_handler(struct work_struct *work);
606static void bh_receive(SLMP_INFO *info); 606static void bh_receive(SLMP_INFO *info);
607static void bh_transmit(SLMP_INFO *info); 607static void bh_transmit(SLMP_INFO *info);
608static void bh_status(SLMP_INFO *info); 608static void bh_status(SLMP_INFO *info);
@@ -2063,9 +2063,9 @@ int bh_action(SLMP_INFO *info)
2063 2063
2064/* Perform bottom half processing of work items queued by ISR. 2064/* Perform bottom half processing of work items queued by ISR.
2065 */ 2065 */
2066void bh_handler(void* Context) 2066void bh_handler(struct work_struct *work)
2067{ 2067{
2068 SLMP_INFO *info = (SLMP_INFO*)Context; 2068 SLMP_INFO *info = container_of(work, SLMP_INFO, task);
2069 int action; 2069 int action;
2070 2070
2071 if (!info) 2071 if (!info)
@@ -3805,7 +3805,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
3805 } else { 3805 } else {
3806 memset(info, 0, sizeof(SLMP_INFO)); 3806 memset(info, 0, sizeof(SLMP_INFO));
3807 info->magic = MGSL_MAGIC; 3807 info->magic = MGSL_MAGIC;
3808 INIT_WORK(&info->task, bh_handler, info); 3808 INIT_WORK(&info->task, bh_handler);
3809 info->max_frame_size = 4096; 3809 info->max_frame_size = 4096;
3810 info->close_delay = 5*HZ/10; 3810 info->close_delay = 5*HZ/10;
3811 info->closing_wait = 30*HZ; 3811 info->closing_wait = 30*HZ;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5f49280779fb..c64f5bcff947 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -219,13 +219,13 @@ static struct sysrq_key_op sysrq_term_op = {
219 .enable_mask = SYSRQ_ENABLE_SIGNAL, 219 .enable_mask = SYSRQ_ENABLE_SIGNAL,
220}; 220};
221 221
222static void moom_callback(void *ignored) 222static void moom_callback(struct work_struct *ignored)
223{ 223{
224 out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], 224 out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
225 GFP_KERNEL, 0); 225 GFP_KERNEL, 0);
226} 226}
227 227
228static DECLARE_WORK(moom_work, moom_callback, NULL); 228static DECLARE_WORK(moom_work, moom_callback);
229 229
230static void sysrq_handle_moom(int key, struct tty_struct *tty) 230static void sysrq_handle_moom(int key, struct tty_struct *tty)
231{ 231{
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 6e1329d404d2..774fa861169a 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -325,9 +325,9 @@ static void user_reader_timeout(unsigned long ptr)
325 schedule_work(&chip->work); 325 schedule_work(&chip->work);
326} 326}
327 327
328static void timeout_work(void *ptr) 328static void timeout_work(struct work_struct *work)
329{ 329{
330 struct tpm_chip *chip = ptr; 330 struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
331 331
332 down(&chip->buffer_mutex); 332 down(&chip->buffer_mutex);
333 atomic_set(&chip->data_pending, 0); 333 atomic_set(&chip->data_pending, 0);
@@ -1105,7 +1105,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
1105 init_MUTEX(&chip->tpm_mutex); 1105 init_MUTEX(&chip->tpm_mutex);
1106 INIT_LIST_HEAD(&chip->list); 1106 INIT_LIST_HEAD(&chip->list);
1107 1107
1108 INIT_WORK(&chip->work, timeout_work, chip); 1108 INIT_WORK(&chip->work, timeout_work);
1109 1109
1110 init_timer(&chip->user_read_timer); 1110 init_timer(&chip->user_read_timer);
1111 chip->user_read_timer.function = user_reader_timeout; 1111 chip->user_read_timer.function = user_reader_timeout;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 50dc49205a23..b3cfc8bc613c 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1254,7 +1254,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
1254 1254
1255/** 1255/**
1256 * do_tty_hangup - actual handler for hangup events 1256 * do_tty_hangup - actual handler for hangup events
1257 * @data: tty device 1257 * @work: tty device
1258 * 1258 *
1259 * This can be called by the "eventd" kernel thread. That is process 1259 * This can be called by the "eventd" kernel thread. That is process
1260 * synchronous but doesn't hold any locks, so we need to make sure we 1260 * synchronous but doesn't hold any locks, so we need to make sure we
@@ -1274,9 +1274,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
1274 * tasklist_lock to walk task list for hangup event 1274 * tasklist_lock to walk task list for hangup event
1275 * 1275 *
1276 */ 1276 */
1277static void do_tty_hangup(void *data) 1277static void do_tty_hangup(struct work_struct *work)
1278{ 1278{
1279 struct tty_struct *tty = (struct tty_struct *) data; 1279 struct tty_struct *tty =
1280 container_of(work, struct tty_struct, hangup_work);
1280 struct file * cons_filp = NULL; 1281 struct file * cons_filp = NULL;
1281 struct file *filp, *f = NULL; 1282 struct file *filp, *f = NULL;
1282 struct task_struct *p; 1283 struct task_struct *p;
@@ -1433,7 +1434,7 @@ void tty_vhangup(struct tty_struct * tty)
1433 1434
1434 printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf)); 1435 printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
1435#endif 1436#endif
1436 do_tty_hangup((void *) tty); 1437 do_tty_hangup(&tty->hangup_work);
1437} 1438}
1438EXPORT_SYMBOL(tty_vhangup); 1439EXPORT_SYMBOL(tty_vhangup);
1439 1440
@@ -3304,12 +3305,13 @@ int tty_ioctl(struct inode * inode, struct file * file,
3304 * Nasty bug: do_SAK is being called in interrupt context. This can 3305 * Nasty bug: do_SAK is being called in interrupt context. This can
3305 * deadlock. We punt it up to process context. AKPM - 16Mar2001 3306 * deadlock. We punt it up to process context. AKPM - 16Mar2001
3306 */ 3307 */
3307static void __do_SAK(void *arg) 3308static void __do_SAK(struct work_struct *work)
3308{ 3309{
3310 struct tty_struct *tty =
3311 container_of(work, struct tty_struct, SAK_work);
3309#ifdef TTY_SOFT_SAK 3312#ifdef TTY_SOFT_SAK
3310 tty_hangup(tty); 3313 tty_hangup(tty);
3311#else 3314#else
3312 struct tty_struct *tty = arg;
3313 struct task_struct *g, *p; 3315 struct task_struct *g, *p;
3314 int session; 3316 int session;
3315 int i; 3317 int i;
@@ -3388,7 +3390,7 @@ void do_SAK(struct tty_struct *tty)
3388{ 3390{
3389 if (!tty) 3391 if (!tty)
3390 return; 3392 return;
3391 PREPARE_WORK(&tty->SAK_work, __do_SAK, tty); 3393 PREPARE_WORK(&tty->SAK_work, __do_SAK);
3392 schedule_work(&tty->SAK_work); 3394 schedule_work(&tty->SAK_work);
3393} 3395}
3394 3396
@@ -3396,7 +3398,7 @@ EXPORT_SYMBOL(do_SAK);
3396 3398
3397/** 3399/**
3398 * flush_to_ldisc 3400 * flush_to_ldisc
3399 * @private_: tty structure passed from work queue. 3401 * @work: tty structure passed from work queue.
3400 * 3402 *
3401 * This routine is called out of the software interrupt to flush data 3403 * This routine is called out of the software interrupt to flush data
3402 * from the buffer chain to the line discipline. 3404 * from the buffer chain to the line discipline.
@@ -3406,9 +3408,10 @@ EXPORT_SYMBOL(do_SAK);
3406 * receive_buf method is single threaded for each tty instance. 3408 * receive_buf method is single threaded for each tty instance.
3407 */ 3409 */
3408 3410
3409static void flush_to_ldisc(void *private_) 3411static void flush_to_ldisc(struct work_struct *work)
3410{ 3412{
3411 struct tty_struct *tty = (struct tty_struct *) private_; 3413 struct tty_struct *tty =
3414 container_of(work, struct tty_struct, buf.work.work);
3412 unsigned long flags; 3415 unsigned long flags;
3413 struct tty_ldisc *disc; 3416 struct tty_ldisc *disc;
3414 struct tty_buffer *tbuf, *head; 3417 struct tty_buffer *tbuf, *head;
@@ -3553,7 +3556,7 @@ void tty_flip_buffer_push(struct tty_struct *tty)
3553 spin_unlock_irqrestore(&tty->buf.lock, flags); 3556 spin_unlock_irqrestore(&tty->buf.lock, flags);
3554 3557
3555 if (tty->low_latency) 3558 if (tty->low_latency)
3556 flush_to_ldisc((void *) tty); 3559 flush_to_ldisc(&tty->buf.work.work);
3557 else 3560 else
3558 schedule_delayed_work(&tty->buf.work, 1); 3561 schedule_delayed_work(&tty->buf.work, 1);
3559} 3562}
@@ -3580,17 +3583,17 @@ static void initialize_tty_struct(struct tty_struct *tty)
3580 tty->overrun_time = jiffies; 3583 tty->overrun_time = jiffies;
3581 tty->buf.head = tty->buf.tail = NULL; 3584 tty->buf.head = tty->buf.tail = NULL;
3582 tty_buffer_init(tty); 3585 tty_buffer_init(tty);
3583 INIT_WORK(&tty->buf.work, flush_to_ldisc, tty); 3586 INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
3584 init_MUTEX(&tty->buf.pty_sem); 3587 init_MUTEX(&tty->buf.pty_sem);
3585 mutex_init(&tty->termios_mutex); 3588 mutex_init(&tty->termios_mutex);
3586 init_waitqueue_head(&tty->write_wait); 3589 init_waitqueue_head(&tty->write_wait);
3587 init_waitqueue_head(&tty->read_wait); 3590 init_waitqueue_head(&tty->read_wait);
3588 INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); 3591 INIT_WORK(&tty->hangup_work, do_tty_hangup);
3589 mutex_init(&tty->atomic_read_lock); 3592 mutex_init(&tty->atomic_read_lock);
3590 mutex_init(&tty->atomic_write_lock); 3593 mutex_init(&tty->atomic_write_lock);
3591 spin_lock_init(&tty->read_lock); 3594 spin_lock_init(&tty->read_lock);
3592 INIT_LIST_HEAD(&tty->tty_files); 3595 INIT_LIST_HEAD(&tty->tty_files);
3593 INIT_WORK(&tty->SAK_work, NULL, NULL); 3596 INIT_WORK(&tty->SAK_work, NULL);
3594} 3597}
3595 3598
3596/* 3599/*
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 87587b4385ab..75ff0286e1ad 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -155,7 +155,7 @@ static void con_flush_chars(struct tty_struct *tty);
155static void set_vesa_blanking(char __user *p); 155static void set_vesa_blanking(char __user *p);
156static void set_cursor(struct vc_data *vc); 156static void set_cursor(struct vc_data *vc);
157static void hide_cursor(struct vc_data *vc); 157static void hide_cursor(struct vc_data *vc);
158static void console_callback(void *ignored); 158static void console_callback(struct work_struct *ignored);
159static void blank_screen_t(unsigned long dummy); 159static void blank_screen_t(unsigned long dummy);
160static void set_palette(struct vc_data *vc); 160static void set_palette(struct vc_data *vc);
161 161
@@ -174,7 +174,7 @@ static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
174static int blankinterval = 10*60*HZ; 174static int blankinterval = 10*60*HZ;
175static int vesa_off_interval; 175static int vesa_off_interval;
176 176
177static DECLARE_WORK(console_work, console_callback, NULL); 177static DECLARE_WORK(console_work, console_callback);
178 178
179/* 179/*
180 * fg_console is the current virtual console, 180 * fg_console is the current virtual console,
@@ -2154,7 +2154,7 @@ out:
2154 * with other console code and prevention of re-entrancy is 2154 * with other console code and prevention of re-entrancy is
2155 * ensured with console_sem. 2155 * ensured with console_sem.
2156 */ 2156 */
2157static void console_callback(void *ignored) 2157static void console_callback(struct work_struct *ignored)
2158{ 2158{
2159 acquire_console_sem(); 2159 acquire_console_sem();
2160 2160
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 05f8ce2cfb4a..b418b16e910e 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,9 +31,11 @@
31#include <linux/connector.h> 31#include <linux/connector.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33 33
34void cn_queue_wrapper(void *data) 34void cn_queue_wrapper(struct work_struct *work)
35{ 35{
36 struct cn_callback_data *d = data; 36 struct cn_callback_entry *cbq =
37 container_of(work, struct cn_callback_entry, work.work);
38 struct cn_callback_data *d = &cbq->data;
37 39
38 d->callback(d->callback_priv); 40 d->callback(d->callback_priv);
39 41
@@ -57,7 +59,7 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc
57 memcpy(&cbq->id.id, id, sizeof(struct cb_id)); 59 memcpy(&cbq->id.id, id, sizeof(struct cb_id));
58 cbq->data.callback = callback; 60 cbq->data.callback = callback;
59 61
60 INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); 62 INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
61 return cbq; 63 return cbq;
62} 64}
63 65
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index b49bacfd8de8..5e7cd45d10ee 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -135,40 +135,39 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
135 spin_lock_bh(&dev->cbdev->queue_lock); 135 spin_lock_bh(&dev->cbdev->queue_lock);
136 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 136 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
137 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 137 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
138 if (likely(!test_bit(0, &__cbq->work.pending) && 138 if (likely(!test_bit(WORK_STRUCT_PENDING,
139 &__cbq->work.work.management) &&
139 __cbq->data.ddata == NULL)) { 140 __cbq->data.ddata == NULL)) {
140 __cbq->data.callback_priv = msg; 141 __cbq->data.callback_priv = msg;
141 142
142 __cbq->data.ddata = data; 143 __cbq->data.ddata = data;
143 __cbq->data.destruct_data = destruct_data; 144 __cbq->data.destruct_data = destruct_data;
144 145
145 if (queue_work(dev->cbdev->cn_queue, 146 if (queue_delayed_work(
146 &__cbq->work)) 147 dev->cbdev->cn_queue,
148 &__cbq->work, 0))
147 err = 0; 149 err = 0;
148 } else { 150 } else {
149 struct work_struct *w;
150 struct cn_callback_data *d; 151 struct cn_callback_data *d;
151 152
152 w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC); 153 __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
153 if (w) { 154 if (__cbq) {
154 d = (struct cn_callback_data *)(w+1); 155 d = &__cbq->data;
155
156 d->callback_priv = msg; 156 d->callback_priv = msg;
157 d->callback = __cbq->data.callback; 157 d->callback = __cbq->data.callback;
158 d->ddata = data; 158 d->ddata = data;
159 d->destruct_data = destruct_data; 159 d->destruct_data = destruct_data;
160 d->free = w; 160 d->free = __cbq;
161 161
162 INIT_LIST_HEAD(&w->entry); 162 INIT_DELAYED_WORK(&__cbq->work,
163 w->pending = 0; 163 &cn_queue_wrapper);
164 w->func = &cn_queue_wrapper;
165 w->data = d;
166 init_timer(&w->timer);
167 164
168 if (queue_work(dev->cbdev->cn_queue, w)) 165 if (queue_delayed_work(
166 dev->cbdev->cn_queue,
167 &__cbq->work, 0))
169 err = 0; 168 err = 0;
170 else { 169 else {
171 kfree(w); 170 kfree(__cbq);
172 err = -EINVAL; 171 err = -EINVAL;
173 } 172 }
174 } else 173 } else
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index dd0c2623e27b..7a7c6e6dfe4f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
42 42
43/* internal prototypes */ 43/* internal prototypes */
44static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 44static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
45static void handle_update(void *data); 45static void handle_update(struct work_struct *work);
46 46
47/** 47/**
48 * Two notifier lists: the "policy" list is involved in the 48 * Two notifier lists: the "policy" list is involved in the
@@ -665,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
665 mutex_init(&policy->lock); 665 mutex_init(&policy->lock);
666 mutex_lock(&policy->lock); 666 mutex_lock(&policy->lock);
667 init_completion(&policy->kobj_unregister); 667 init_completion(&policy->kobj_unregister);
668 INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); 668 INIT_WORK(&policy->update, handle_update);
669 669
670 /* call driver. From then on the cpufreq must be able 670 /* call driver. From then on the cpufreq must be able
671 * to accept all calls to ->verify and ->setpolicy for this CPU 671 * to accept all calls to ->verify and ->setpolicy for this CPU
@@ -895,9 +895,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
895} 895}
896 896
897 897
898static void handle_update(void *data) 898static void handle_update(struct work_struct *work)
899{ 899{
900 unsigned int cpu = (unsigned int)(long)data; 900 struct cpufreq_policy *policy =
901 container_of(work, struct cpufreq_policy, update);
902 unsigned int cpu = policy->cpu;
901 dprintk("handle_update for cpu %u called\n", cpu); 903 dprintk("handle_update for cpu %u called\n", cpu);
902 cpufreq_update_policy(cpu); 904 cpufreq_update_policy(cpu);
903} 905}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c4c578defabf..5ef5ede5b884 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -59,7 +59,7 @@ static unsigned int def_sampling_rate;
59#define MAX_SAMPLING_DOWN_FACTOR (10) 59#define MAX_SAMPLING_DOWN_FACTOR (10)
60#define TRANSITION_LATENCY_LIMIT (10 * 1000) 60#define TRANSITION_LATENCY_LIMIT (10 * 1000)
61 61
62static void do_dbs_timer(void *data); 62static void do_dbs_timer(struct work_struct *work);
63 63
64struct cpu_dbs_info_s { 64struct cpu_dbs_info_s {
65 struct cpufreq_policy *cur_policy; 65 struct cpufreq_policy *cur_policy;
@@ -82,7 +82,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
82 * is recursive for the same process. -Venki 82 * is recursive for the same process. -Venki
83 */ 83 */
84static DEFINE_MUTEX (dbs_mutex); 84static DEFINE_MUTEX (dbs_mutex);
85static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 85static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
86 86
87struct dbs_tuners { 87struct dbs_tuners {
88 unsigned int sampling_rate; 88 unsigned int sampling_rate;
@@ -420,7 +420,7 @@ static void dbs_check_cpu(int cpu)
420 } 420 }
421} 421}
422 422
423static void do_dbs_timer(void *data) 423static void do_dbs_timer(struct work_struct *work)
424{ 424{
425 int i; 425 int i;
426 lock_cpu_hotplug(); 426 lock_cpu_hotplug();
@@ -435,7 +435,6 @@ static void do_dbs_timer(void *data)
435 435
436static inline void dbs_timer_init(void) 436static inline void dbs_timer_init(void)
437{ 437{
438 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
439 schedule_delayed_work(&dbs_work, 438 schedule_delayed_work(&dbs_work,
440 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 439 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
441 return; 440 return;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bf8aa45d4f01..e1cc5113c2ae 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,13 +47,17 @@ static unsigned int def_sampling_rate;
47#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 47#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
48#define TRANSITION_LATENCY_LIMIT (10 * 1000) 48#define TRANSITION_LATENCY_LIMIT (10 * 1000)
49 49
50static void do_dbs_timer(void *data); 50static void do_dbs_timer(struct work_struct *work);
51
52/* Sampling types */
53enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
51 54
52struct cpu_dbs_info_s { 55struct cpu_dbs_info_s {
53 cputime64_t prev_cpu_idle; 56 cputime64_t prev_cpu_idle;
54 cputime64_t prev_cpu_wall; 57 cputime64_t prev_cpu_wall;
55 struct cpufreq_policy *cur_policy; 58 struct cpufreq_policy *cur_policy;
56 struct work_struct work; 59 struct delayed_work work;
60 enum dbs_sample sample_type;
57 unsigned int enable; 61 unsigned int enable;
58 struct cpufreq_frequency_table *freq_table; 62 struct cpufreq_frequency_table *freq_table;
59 unsigned int freq_lo; 63 unsigned int freq_lo;
@@ -407,30 +411,31 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
407 } 411 }
408} 412}
409 413
410/* Sampling types */ 414static void do_dbs_timer(struct work_struct *work)
411enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
412
413static void do_dbs_timer(void *data)
414{ 415{
415 unsigned int cpu = smp_processor_id(); 416 unsigned int cpu = smp_processor_id();
416 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 417 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
418 enum dbs_sample sample_type = dbs_info->sample_type;
417 /* We want all CPUs to do sampling nearly on same jiffy */ 419 /* We want all CPUs to do sampling nearly on same jiffy */
418 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 420 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
421
422 /* Permit rescheduling of this work item */
423 work_release(work);
424
419 delay -= jiffies % delay; 425 delay -= jiffies % delay;
420 426
421 if (!dbs_info->enable) 427 if (!dbs_info->enable)
422 return; 428 return;
423 /* Common NORMAL_SAMPLE setup */ 429 /* Common NORMAL_SAMPLE setup */
424 INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE); 430 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
425 if (!dbs_tuners_ins.powersave_bias || 431 if (!dbs_tuners_ins.powersave_bias ||
426 (unsigned long) data == DBS_NORMAL_SAMPLE) { 432 sample_type == DBS_NORMAL_SAMPLE) {
427 lock_cpu_hotplug(); 433 lock_cpu_hotplug();
428 dbs_check_cpu(dbs_info); 434 dbs_check_cpu(dbs_info);
429 unlock_cpu_hotplug(); 435 unlock_cpu_hotplug();
430 if (dbs_info->freq_lo) { 436 if (dbs_info->freq_lo) {
431 /* Setup timer for SUB_SAMPLE */ 437 /* Setup timer for SUB_SAMPLE */
432 INIT_WORK(&dbs_info->work, do_dbs_timer, 438 dbs_info->sample_type = DBS_SUB_SAMPLE;
433 (void *)DBS_SUB_SAMPLE);
434 delay = dbs_info->freq_hi_jiffies; 439 delay = dbs_info->freq_hi_jiffies;
435 } 440 }
436 } else { 441 } else {
@@ -449,7 +454,8 @@ static inline void dbs_timer_init(unsigned int cpu)
449 delay -= jiffies % delay; 454 delay -= jiffies % delay;
450 455
451 ondemand_powersave_bias_init(); 456 ondemand_powersave_bias_init();
452 INIT_WORK(&dbs_info->work, do_dbs_timer, NULL); 457 INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
458 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
453 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 459 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
454} 460}
455 461
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
index 4630f1969a09..15edf40828b4 100644
--- a/drivers/i2c/chips/ds1374.c
+++ b/drivers/i2c/chips/ds1374.c
@@ -140,12 +140,14 @@ ulong ds1374_get_rtc_time(void)
140 return t1; 140 return t1;
141} 141}
142 142
143static void ds1374_set_work(void *arg) 143static ulong new_time;
144
145static void ds1374_set_work(struct work_struct *work)
144{ 146{
145 ulong t1, t2; 147 ulong t1, t2;
146 int limit = 10; /* arbitrary retry limit */ 148 int limit = 10; /* arbitrary retry limit */
147 149
148 t1 = *(ulong *) arg; 150 t1 = new_time;
149 151
150 mutex_lock(&ds1374_mutex); 152 mutex_lock(&ds1374_mutex);
151 153
@@ -167,11 +169,9 @@ static void ds1374_set_work(void *arg)
167 "can't confirm time set from rtc chip\n"); 169 "can't confirm time set from rtc chip\n");
168} 170}
169 171
170static ulong new_time;
171
172static struct workqueue_struct *ds1374_workqueue; 172static struct workqueue_struct *ds1374_workqueue;
173 173
174static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time); 174static DECLARE_WORK(ds1374_work, ds1374_set_work);
175 175
176int ds1374_set_rtc_time(ulong nowtime) 176int ds1374_set_rtc_time(ulong nowtime)
177{ 177{
@@ -180,7 +180,7 @@ int ds1374_set_rtc_time(ulong nowtime)
180 if (in_interrupt()) 180 if (in_interrupt())
181 queue_work(ds1374_workqueue, &ds1374_work); 181 queue_work(ds1374_workqueue, &ds1374_work);
182 else 182 else
183 ds1374_set_work(&new_time); 183 ds1374_set_work(NULL);
184 184
185 return 0; 185 return 0;
186} 186}
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index bef4759f70e5..7efd28ac21ed 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -192,20 +192,10 @@ static int ide_config(struct pcmcia_device *link)
192 tuple.TupleOffset = 0; 192 tuple.TupleOffset = 0;
193 tuple.TupleDataMax = 255; 193 tuple.TupleDataMax = 255;
194 tuple.Attributes = 0; 194 tuple.Attributes = 0;
195 tuple.DesiredTuple = CISTPL_CONFIG; 195
196 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 196 is_kme = ((link->manf_id == MANFID_KME) &&
197 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); 197 ((link->card_id == PRODID_KME_KXLC005_A) ||
198 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &stk->parse)); 198 (link->card_id == PRODID_KME_KXLC005_B)));
199 link->conf.ConfigBase = stk->parse.config.base;
200 link->conf.Present = stk->parse.config.rmask[0];
201
202 tuple.DesiredTuple = CISTPL_MANFID;
203 if (!pcmcia_get_first_tuple(link, &tuple) &&
204 !pcmcia_get_tuple_data(link, &tuple) &&
205 !pcmcia_parse_tuple(link, &tuple, &stk->parse))
206 is_kme = ((stk->parse.manfid.manf == MANFID_KME) &&
207 ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) ||
208 (stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
209 199
210 /* Not sure if this is right... look up the current Vcc */ 200 /* Not sure if this is right... look up the current Vcc */
211 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &stk->conf)); 201 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &stk->conf));
@@ -408,8 +398,10 @@ static struct pcmcia_device_id ide_ids[] = {
408 PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), 398 PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
409 PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), 399 PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
410 PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), 400 PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443),
401 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
411 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), 402 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
412 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), 403 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
404 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
413 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 405 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
414 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), 406 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
415 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), 407 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index d90a3a1898c0..8f4378a1631c 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -31,9 +31,10 @@
31#include "config_roms.h" 31#include "config_roms.h"
32 32
33 33
34static void delayed_reset_bus(void * __reset_info) 34static void delayed_reset_bus(struct work_struct *work)
35{ 35{
36 struct hpsb_host *host = (struct hpsb_host*)__reset_info; 36 struct hpsb_host *host =
37 container_of(work, struct hpsb_host, delayed_reset.work);
37 int generation = host->csr.generation + 1; 38 int generation = host->csr.generation + 1;
38 39
39 /* The generation field rolls over to 2 rather than 0 per IEEE 40 /* The generation field rolls over to 2 rather than 0 per IEEE
@@ -145,7 +146,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
145 146
146 atomic_set(&h->generation, 0); 147 atomic_set(&h->generation, 0);
147 148
148 INIT_WORK(&h->delayed_reset, delayed_reset_bus, h); 149 INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
149 150
150 init_timer(&h->timeout); 151 init_timer(&h->timeout);
151 h->timeout.data = (unsigned long) h; 152 h->timeout.data = (unsigned long) h;
@@ -234,7 +235,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host)
234 * Config ROM in the near future. */ 235 * Config ROM in the near future. */
235 reset_delay = HZ; 236 reset_delay = HZ;
236 237
237 PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host); 238 PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
238 schedule_delayed_work(&host->delayed_reset, reset_delay); 239 schedule_delayed_work(&host->delayed_reset, reset_delay);
239 240
240 return 0; 241 return 0;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index bc6dbfadb891..d553e38c9543 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -62,7 +62,7 @@ struct hpsb_host {
62 struct class_device class_dev; 62 struct class_device class_dev;
63 63
64 int update_config_rom; 64 int update_config_rom;
65 struct work_struct delayed_reset; 65 struct delayed_work delayed_reset;
66 unsigned int config_roms; 66 unsigned int config_roms;
67 67
68 struct list_head addr_space; 68 struct list_head addr_space;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 6986ac188281..cd156d4e779e 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -493,20 +493,25 @@ static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id,
493 scsi_unblock_requests(scsi_id->scsi_host); 493 scsi_unblock_requests(scsi_id->scsi_host);
494} 494}
495 495
496static void sbp2util_write_orb_pointer(void *p) 496static void sbp2util_write_orb_pointer(struct work_struct *work)
497{ 497{
498 struct scsi_id_instance_data *scsi_id =
499 container_of(work, struct scsi_id_instance_data,
500 protocol_work.work);
498 quadlet_t data[2]; 501 quadlet_t data[2];
499 502
500 data[0] = ORB_SET_NODE_ID( 503 data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
501 ((struct scsi_id_instance_data *)p)->hi->host->node_id); 504 data[1] = scsi_id->last_orb_dma;
502 data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
503 sbp2util_cpu_to_be32_buffer(data, 8); 505 sbp2util_cpu_to_be32_buffer(data, 8);
504 sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8); 506 sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
505} 507}
506 508
507static void sbp2util_write_doorbell(void *p) 509static void sbp2util_write_doorbell(struct work_struct *work)
508{ 510{
509 sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4); 511 struct scsi_id_instance_data *scsi_id =
512 container_of(work, struct scsi_id_instance_data,
513 protocol_work.work);
514 sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
510} 515}
511 516
512/* 517/*
@@ -843,7 +848,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
843 INIT_LIST_HEAD(&scsi_id->scsi_list); 848 INIT_LIST_HEAD(&scsi_id->scsi_list);
844 spin_lock_init(&scsi_id->sbp2_command_orb_lock); 849 spin_lock_init(&scsi_id->sbp2_command_orb_lock);
845 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); 850 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
846 INIT_WORK(&scsi_id->protocol_work, NULL, NULL); 851 INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
847 852
848 ud->device.driver_data = scsi_id; 853 ud->device.driver_data = scsi_id;
849 854
@@ -2047,11 +2052,10 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
2047 * We do not accept new commands until the job is over. 2052 * We do not accept new commands until the job is over.
2048 */ 2053 */
2049 scsi_block_requests(scsi_id->scsi_host); 2054 scsi_block_requests(scsi_id->scsi_host);
2050 PREPARE_WORK(&scsi_id->protocol_work, 2055 PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
2051 last_orb ? sbp2util_write_doorbell: 2056 last_orb ? sbp2util_write_doorbell:
2052 sbp2util_write_orb_pointer, 2057 sbp2util_write_orb_pointer);
2053 scsi_id); 2058 schedule_delayed_work(&scsi_id->protocol_work, 0);
2054 schedule_work(&scsi_id->protocol_work);
2055 } 2059 }
2056} 2060}
2057 2061
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index abbe48e646c3..1b16d6b9cf11 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -348,7 +348,7 @@ struct scsi_id_instance_data {
348 unsigned workarounds; 348 unsigned workarounds;
349 349
350 atomic_t state; 350 atomic_t state;
351 struct work_struct protocol_work; 351 struct delayed_work protocol_work;
352}; 352};
353 353
354/* For use in scsi_id_instance_data.state */ 354/* For use in scsi_id_instance_data.state */
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 7767a11b6890..af939796750d 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -55,11 +55,11 @@ struct addr_req {
55 int status; 55 int status;
56}; 56};
57 57
58static void process_req(void *data); 58static void process_req(struct work_struct *work);
59 59
60static DEFINE_MUTEX(lock); 60static DEFINE_MUTEX(lock);
61static LIST_HEAD(req_list); 61static LIST_HEAD(req_list);
62static DECLARE_WORK(work, process_req, NULL); 62static DECLARE_DELAYED_WORK(work, process_req);
63static struct workqueue_struct *addr_wq; 63static struct workqueue_struct *addr_wq;
64 64
65void rdma_addr_register_client(struct rdma_addr_client *client) 65void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -215,7 +215,7 @@ out:
215 return ret; 215 return ret;
216} 216}
217 217
218static void process_req(void *data) 218static void process_req(struct work_struct *work)
219{ 219{
220 struct addr_req *req, *temp_req; 220 struct addr_req *req, *temp_req;
221 struct sockaddr_in *src_in, *dst_in; 221 struct sockaddr_in *src_in, *dst_in;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 20e9f64e67a6..98272fbbfb31 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -285,9 +285,10 @@ err:
285 kfree(tprops); 285 kfree(tprops);
286} 286}
287 287
288static void ib_cache_task(void *work_ptr) 288static void ib_cache_task(struct work_struct *_work)
289{ 289{
290 struct ib_update_work *work = work_ptr; 290 struct ib_update_work *work =
291 container_of(_work, struct ib_update_work, work);
291 292
292 ib_cache_update(work->device, work->port_num); 293 ib_cache_update(work->device, work->port_num);
293 kfree(work); 294 kfree(work);
@@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
306 event->event == IB_EVENT_CLIENT_REREGISTER) { 307 event->event == IB_EVENT_CLIENT_REREGISTER) {
307 work = kmalloc(sizeof *work, GFP_ATOMIC); 308 work = kmalloc(sizeof *work, GFP_ATOMIC);
308 if (work) { 309 if (work) {
309 INIT_WORK(&work->work, ib_cache_task, work); 310 INIT_WORK(&work->work, ib_cache_task);
310 work->device = event->device; 311 work->device = event->device;
311 work->port_num = event->element.port_num; 312 work->port_num = event->element.port_num;
312 schedule_work(&work->work); 313 schedule_work(&work->work);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e5dc4530808a..79c937bf6962 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -101,7 +101,7 @@ struct cm_av {
101}; 101};
102 102
103struct cm_work { 103struct cm_work {
104 struct work_struct work; 104 struct delayed_work work;
105 struct list_head list; 105 struct list_head list;
106 struct cm_port *port; 106 struct cm_port *port;
107 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 107 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
@@ -161,7 +161,7 @@ struct cm_id_private {
161 atomic_t work_count; 161 atomic_t work_count;
162}; 162};
163 163
164static void cm_work_handler(void *data); 164static void cm_work_handler(struct work_struct *work);
165 165
166static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 166static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
167{ 167{
@@ -668,8 +668,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
668 return ERR_PTR(-ENOMEM); 668 return ERR_PTR(-ENOMEM);
669 669
670 timewait_info->work.local_id = local_id; 670 timewait_info->work.local_id = local_id;
671 INIT_WORK(&timewait_info->work.work, cm_work_handler, 671 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
672 &timewait_info->work);
673 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 672 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
674 return timewait_info; 673 return timewait_info;
675} 674}
@@ -2995,9 +2994,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
2995 } 2994 }
2996} 2995}
2997 2996
2998static void cm_work_handler(void *data) 2997static void cm_work_handler(struct work_struct *_work)
2999{ 2998{
3000 struct cm_work *work = data; 2999 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3001 int ret; 3000 int ret;
3002 3001
3003 switch (work->cm_event.event) { 3002 switch (work->cm_event.event) {
@@ -3087,12 +3086,12 @@ static int cm_establish(struct ib_cm_id *cm_id)
3087 * we need to find the cm_id once we're in the context of the 3086 * we need to find the cm_id once we're in the context of the
3088 * worker thread, rather than holding a reference on it. 3087 * worker thread, rather than holding a reference on it.
3089 */ 3088 */
3090 INIT_WORK(&work->work, cm_work_handler, work); 3089 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3091 work->local_id = cm_id->local_id; 3090 work->local_id = cm_id->local_id;
3092 work->remote_id = cm_id->remote_id; 3091 work->remote_id = cm_id->remote_id;
3093 work->mad_recv_wc = NULL; 3092 work->mad_recv_wc = NULL;
3094 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3093 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3095 queue_work(cm.wq, &work->work); 3094 queue_delayed_work(cm.wq, &work->work, 0);
3096out: 3095out:
3097 return ret; 3096 return ret;
3098} 3097}
@@ -3191,11 +3190,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3191 return; 3190 return;
3192 } 3191 }
3193 3192
3194 INIT_WORK(&work->work, cm_work_handler, work); 3193 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3195 work->cm_event.event = event; 3194 work->cm_event.event = event;
3196 work->mad_recv_wc = mad_recv_wc; 3195 work->mad_recv_wc = mad_recv_wc;
3197 work->port = (struct cm_port *)mad_agent->context; 3196 work->port = (struct cm_port *)mad_agent->context;
3198 queue_work(cm.wq, &work->work); 3197 queue_delayed_work(cm.wq, &work->work, 0);
3199} 3198}
3200 3199
3201static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3200static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index cf48f2697434..985a6b564d8f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1340,9 +1340,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1340 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1340 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1341} 1341}
1342 1342
1343static void cma_work_handler(void *data) 1343static void cma_work_handler(struct work_struct *_work)
1344{ 1344{
1345 struct cma_work *work = data; 1345 struct cma_work *work = container_of(_work, struct cma_work, work);
1346 struct rdma_id_private *id_priv = work->id; 1346 struct rdma_id_private *id_priv = work->id;
1347 int destroy = 0; 1347 int destroy = 0;
1348 1348
@@ -1373,7 +1373,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1373 return -ENOMEM; 1373 return -ENOMEM;
1374 1374
1375 work->id = id_priv; 1375 work->id = id_priv;
1376 INIT_WORK(&work->work, cma_work_handler, work); 1376 INIT_WORK(&work->work, cma_work_handler);
1377 work->old_state = CMA_ROUTE_QUERY; 1377 work->old_state = CMA_ROUTE_QUERY;
1378 work->new_state = CMA_ROUTE_RESOLVED; 1378 work->new_state = CMA_ROUTE_RESOLVED;
1379 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1379 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1430,7 +1430,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1430 return -ENOMEM; 1430 return -ENOMEM;
1431 1431
1432 work->id = id_priv; 1432 work->id = id_priv;
1433 INIT_WORK(&work->work, cma_work_handler, work); 1433 INIT_WORK(&work->work, cma_work_handler);
1434 work->old_state = CMA_ROUTE_QUERY; 1434 work->old_state = CMA_ROUTE_QUERY;
1435 work->new_state = CMA_ROUTE_RESOLVED; 1435 work->new_state = CMA_ROUTE_RESOLVED;
1436 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1436 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1583,7 +1583,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1583 } 1583 }
1584 1584
1585 work->id = id_priv; 1585 work->id = id_priv;
1586 INIT_WORK(&work->work, cma_work_handler, work); 1586 INIT_WORK(&work->work, cma_work_handler);
1587 work->old_state = CMA_ADDR_QUERY; 1587 work->old_state = CMA_ADDR_QUERY;
1588 work->new_state = CMA_ADDR_RESOLVED; 1588 work->new_state = CMA_ADDR_RESOLVED;
1589 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1589 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index cf797d7aea09..1039ad57d53b 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -828,9 +828,9 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
828 * thread asleep on the destroy_comp list vs. an object destroyed 828 * thread asleep on the destroy_comp list vs. an object destroyed
829 * here synchronously when the last reference is removed. 829 * here synchronously when the last reference is removed.
830 */ 830 */
831static void cm_work_handler(void *arg) 831static void cm_work_handler(struct work_struct *_work)
832{ 832{
833 struct iwcm_work *work = arg; 833 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
834 struct iw_cm_event levent; 834 struct iw_cm_event levent;
835 struct iwcm_id_private *cm_id_priv = work->cm_id; 835 struct iwcm_id_private *cm_id_priv = work->cm_id;
836 unsigned long flags; 836 unsigned long flags;
@@ -900,7 +900,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
900 goto out; 900 goto out;
901 } 901 }
902 902
903 INIT_WORK(&work->work, cm_work_handler, work); 903 INIT_WORK(&work->work, cm_work_handler);
904 work->cm_id = cm_id_priv; 904 work->cm_id = cm_id_priv;
905 work->event = *iw_event; 905 work->event = *iw_event;
906 906
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 3f9c16232c4d..15f38d94b3a8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent(
65static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 65static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66 struct ib_mad_private *mad); 66 struct ib_mad_private *mad);
67static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 67static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
68static void timeout_sends(void *data); 68static void timeout_sends(struct work_struct *work);
69static void local_completions(void *data); 69static void local_completions(struct work_struct *work);
70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv, 71 struct ib_mad_agent_private *agent_priv,
72 u8 mgmt_class); 72 u8 mgmt_class);
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
356 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 356 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
357 INIT_LIST_HEAD(&mad_agent_priv->done_list); 357 INIT_LIST_HEAD(&mad_agent_priv->done_list);
358 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 358 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
359 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); 359 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
360 INIT_LIST_HEAD(&mad_agent_priv->local_list); 360 INIT_LIST_HEAD(&mad_agent_priv->local_list);
361 INIT_WORK(&mad_agent_priv->local_work, local_completions, 361 INIT_WORK(&mad_agent_priv->local_work, local_completions);
362 mad_agent_priv);
363 atomic_set(&mad_agent_priv->refcount, 1); 362 atomic_set(&mad_agent_priv->refcount, 1);
364 init_completion(&mad_agent_priv->comp); 363 init_completion(&mad_agent_priv->comp);
365 364
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
2198/* 2197/*
2199 * IB MAD completion callback 2198 * IB MAD completion callback
2200 */ 2199 */
2201static void ib_mad_completion_handler(void *data) 2200static void ib_mad_completion_handler(struct work_struct *work)
2202{ 2201{
2203 struct ib_mad_port_private *port_priv; 2202 struct ib_mad_port_private *port_priv;
2204 struct ib_wc wc; 2203 struct ib_wc wc;
2205 2204
2206 port_priv = (struct ib_mad_port_private *)data; 2205 port_priv = container_of(work, struct ib_mad_port_private, work);
2207 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 2206 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2208 2207
2209 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { 2208 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2324} 2323}
2325EXPORT_SYMBOL(ib_cancel_mad); 2324EXPORT_SYMBOL(ib_cancel_mad);
2326 2325
2327static void local_completions(void *data) 2326static void local_completions(struct work_struct *work)
2328{ 2327{
2329 struct ib_mad_agent_private *mad_agent_priv; 2328 struct ib_mad_agent_private *mad_agent_priv;
2330 struct ib_mad_local_private *local; 2329 struct ib_mad_local_private *local;
@@ -2334,7 +2333,8 @@ static void local_completions(void *data)
2334 struct ib_wc wc; 2333 struct ib_wc wc;
2335 struct ib_mad_send_wc mad_send_wc; 2334 struct ib_mad_send_wc mad_send_wc;
2336 2335
2337 mad_agent_priv = (struct ib_mad_agent_private *)data; 2336 mad_agent_priv =
2337 container_of(work, struct ib_mad_agent_private, local_work);
2338 2338
2339 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2339 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2340 while (!list_empty(&mad_agent_priv->local_list)) { 2340 while (!list_empty(&mad_agent_priv->local_list)) {
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2434 return ret; 2434 return ret;
2435} 2435}
2436 2436
2437static void timeout_sends(void *data) 2437static void timeout_sends(struct work_struct *work)
2438{ 2438{
2439 struct ib_mad_agent_private *mad_agent_priv; 2439 struct ib_mad_agent_private *mad_agent_priv;
2440 struct ib_mad_send_wr_private *mad_send_wr; 2440 struct ib_mad_send_wr_private *mad_send_wr;
2441 struct ib_mad_send_wc mad_send_wc; 2441 struct ib_mad_send_wc mad_send_wc;
2442 unsigned long flags, delay; 2442 unsigned long flags, delay;
2443 2443
2444 mad_agent_priv = (struct ib_mad_agent_private *)data; 2444 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2445 timed_work.work);
2445 mad_send_wc.vendor_err = 0; 2446 mad_send_wc.vendor_err = 0;
2446 2447
2447 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2448 spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2799,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
2799 ret = -ENOMEM; 2800 ret = -ENOMEM;
2800 goto error8; 2801 goto error8;
2801 } 2802 }
2802 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); 2803 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2803 2804
2804 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 2805 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2805 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 2806 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d06b59083f6e..d5548e73e068 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -102,7 +102,7 @@ struct ib_mad_agent_private {
102 struct list_head send_list; 102 struct list_head send_list;
103 struct list_head wait_list; 103 struct list_head wait_list;
104 struct list_head done_list; 104 struct list_head done_list;
105 struct work_struct timed_work; 105 struct delayed_work timed_work;
106 unsigned long timeout; 106 unsigned long timeout;
107 struct list_head local_list; 107 struct list_head local_list;
108 struct work_struct local_work; 108 struct work_struct local_work;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 1ef79d015a1e..3663fd7022be 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -45,8 +45,8 @@ enum rmpp_state {
45struct mad_rmpp_recv { 45struct mad_rmpp_recv {
46 struct ib_mad_agent_private *agent; 46 struct ib_mad_agent_private *agent;
47 struct list_head list; 47 struct list_head list;
48 struct work_struct timeout_work; 48 struct delayed_work timeout_work;
49 struct work_struct cleanup_work; 49 struct delayed_work cleanup_work;
50 struct completion comp; 50 struct completion comp;
51 enum rmpp_state state; 51 enum rmpp_state state;
52 spinlock_t lock; 52 spinlock_t lock;
@@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent,
233 } 233 }
234} 234}
235 235
236static void recv_timeout_handler(void *data) 236static void recv_timeout_handler(struct work_struct *work)
237{ 237{
238 struct mad_rmpp_recv *rmpp_recv = data; 238 struct mad_rmpp_recv *rmpp_recv =
239 container_of(work, struct mad_rmpp_recv, timeout_work.work);
239 struct ib_mad_recv_wc *rmpp_wc; 240 struct ib_mad_recv_wc *rmpp_wc;
240 unsigned long flags; 241 unsigned long flags;
241 242
@@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data)
254 ib_free_recv_mad(rmpp_wc); 255 ib_free_recv_mad(rmpp_wc);
255} 256}
256 257
257static void recv_cleanup_handler(void *data) 258static void recv_cleanup_handler(struct work_struct *work)
258{ 259{
259 struct mad_rmpp_recv *rmpp_recv = data; 260 struct mad_rmpp_recv *rmpp_recv =
261 container_of(work, struct mad_rmpp_recv, cleanup_work.work);
260 unsigned long flags; 262 unsigned long flags;
261 263
262 spin_lock_irqsave(&rmpp_recv->agent->lock, flags); 264 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
@@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
285 287
286 rmpp_recv->agent = agent; 288 rmpp_recv->agent = agent;
287 init_completion(&rmpp_recv->comp); 289 init_completion(&rmpp_recv->comp);
288 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); 290 INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
289 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); 291 INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
290 spin_lock_init(&rmpp_recv->lock); 292 spin_lock_init(&rmpp_recv->lock);
291 rmpp_recv->state = RMPP_STATE_ACTIVE; 293 rmpp_recv->state = RMPP_STATE_ACTIVE;
292 atomic_set(&rmpp_recv->refcount, 1); 294 atomic_set(&rmpp_recv->refcount, 1);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1706d3c7e95e..e45afba75341 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref)
360 kfree(sm_ah); 360 kfree(sm_ah);
361} 361}
362 362
363static void update_sm_ah(void *port_ptr) 363static void update_sm_ah(struct work_struct *work)
364{ 364{
365 struct ib_sa_port *port = port_ptr; 365 struct ib_sa_port *port =
366 container_of(work, struct ib_sa_port, update_task);
366 struct ib_sa_sm_ah *new_ah, *old_ah; 367 struct ib_sa_sm_ah *new_ah, *old_ah;
367 struct ib_port_attr port_attr; 368 struct ib_port_attr port_attr;
368 struct ib_ah_attr ah_attr; 369 struct ib_ah_attr ah_attr;
@@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device)
992 if (IS_ERR(sa_dev->port[i].agent)) 993 if (IS_ERR(sa_dev->port[i].agent))
993 goto err; 994 goto err;
994 995
995 INIT_WORK(&sa_dev->port[i].update_task, 996 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
996 update_sm_ah, &sa_dev->port[i]);
997 } 997 }
998 998
999 ib_set_client_data(device, &sa_client, sa_dev); 999 ib_set_client_data(device, &sa_client, sa_dev);
@@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device)
1010 goto err; 1010 goto err;
1011 1011
1012 for (i = 0; i <= e - s; ++i) 1012 for (i = 0; i <= e - s; ++i)
1013 update_sm_ah(&sa_dev->port[i]); 1013 update_sm_ah(&sa_dev->port[i].update_task);
1014 1014
1015 return; 1015 return;
1016 1016
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index efe147dbeb42..db12cc0841df 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
179 up_write(&current->mm->mmap_sem); 179 up_write(&current->mm->mmap_sem);
180} 180}
181 181
182static void ib_umem_account(void *work_ptr) 182static void ib_umem_account(struct work_struct *_work)
183{ 183{
184 struct ib_umem_account_work *work = work_ptr; 184 struct ib_umem_account_work *work =
185 container_of(_work, struct ib_umem_account_work, work);
185 186
186 down_write(&work->mm->mmap_sem); 187 down_write(&work->mm->mmap_sem);
187 work->mm->locked_vm -= work->diff; 188 work->mm->locked_vm -= work->diff;
@@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
216 return; 217 return;
217 } 218 }
218 219
219 INIT_WORK(&work->work, ib_umem_account, work); 220 INIT_WORK(&work->work, ib_umem_account);
220 work->mm = mm; 221 work->mm = mm;
221 work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; 222 work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
222 223
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 413754b1d8a2..8536aeb96af8 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -214,9 +214,10 @@ struct ipath_user_pages_work {
214 unsigned long num_pages; 214 unsigned long num_pages;
215}; 215};
216 216
217static void user_pages_account(void *ptr) 217static void user_pages_account(struct work_struct *_work)
218{ 218{
219 struct ipath_user_pages_work *work = ptr; 219 struct ipath_user_pages_work *work =
220 container_of(_work, struct ipath_user_pages_work, work);
220 221
221 down_write(&work->mm->mmap_sem); 222 down_write(&work->mm->mmap_sem);
222 work->mm->locked_vm -= work->num_pages; 223 work->mm->locked_vm -= work->num_pages;
@@ -242,7 +243,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
242 243
243 goto bail; 244 goto bail;
244 245
245 INIT_WORK(&work->work, user_pages_account, work); 246 INIT_WORK(&work->work, user_pages_account);
246 work->mm = mm; 247 work->mm = mm;
247 work->num_pages = num_pages; 248 work->num_pages = num_pages;
248 249
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index cd044ea2dfa4..e948158a28d9 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -57,7 +57,7 @@ static int catas_reset_disable;
57module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); 57module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
58MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); 58MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
59 59
60static void catas_reset(void *work_ptr) 60static void catas_reset(struct work_struct *work)
61{ 61{
62 struct mthca_dev *dev, *tmpdev; 62 struct mthca_dev *dev, *tmpdev;
63 LIST_HEAD(tlist); 63 LIST_HEAD(tlist);
@@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
203 203
204int __init mthca_catas_init(void) 204int __init mthca_catas_init(void)
205{ 205{
206 INIT_WORK(&catas_work, catas_reset, NULL); 206 INIT_WORK(&catas_work, catas_reset);
207 207
208 catas_wq = create_singlethread_workqueue("mthca_catas"); 208 catas_wq = create_singlethread_workqueue("mthca_catas");
209 if (!catas_wq) 209 if (!catas_wq)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index f2b61851a49c..99547996aba2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -136,11 +136,11 @@ struct ipoib_dev_priv {
136 struct list_head multicast_list; 136 struct list_head multicast_list;
137 struct rb_root multicast_tree; 137 struct rb_root multicast_tree;
138 138
139 struct work_struct pkey_task; 139 struct delayed_work pkey_task;
140 struct work_struct mcast_task; 140 struct delayed_work mcast_task;
141 struct work_struct flush_task; 141 struct work_struct flush_task;
142 struct work_struct restart_task; 142 struct work_struct restart_task;
143 struct work_struct ah_reap_task; 143 struct delayed_work ah_reap_task;
144 144
145 struct ib_device *ca; 145 struct ib_device *ca;
146 u8 port; 146 u8 port;
@@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev);
254 254
255void ipoib_send(struct net_device *dev, struct sk_buff *skb, 255void ipoib_send(struct net_device *dev, struct sk_buff *skb,
256 struct ipoib_ah *address, u32 qpn); 256 struct ipoib_ah *address, u32 qpn);
257void ipoib_reap_ah(void *dev_ptr); 257void ipoib_reap_ah(struct work_struct *work);
258 258
259void ipoib_flush_paths(struct net_device *dev); 259void ipoib_flush_paths(struct net_device *dev);
260struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); 260struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
261 261
262int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 262int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
263void ipoib_ib_dev_flush(void *dev); 263void ipoib_ib_dev_flush(struct work_struct *work);
264void ipoib_ib_dev_cleanup(struct net_device *dev); 264void ipoib_ib_dev_cleanup(struct net_device *dev);
265 265
266int ipoib_ib_dev_open(struct net_device *dev); 266int ipoib_ib_dev_open(struct net_device *dev);
@@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev);
271int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 271int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
272void ipoib_dev_cleanup(struct net_device *dev); 272void ipoib_dev_cleanup(struct net_device *dev);
273 273
274void ipoib_mcast_join_task(void *dev_ptr); 274void ipoib_mcast_join_task(struct work_struct *work);
275void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); 275void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
276 276
277void ipoib_mcast_restart_task(void *dev_ptr); 277void ipoib_mcast_restart_task(struct work_struct *work);
278int ipoib_mcast_start_thread(struct net_device *dev); 278int ipoib_mcast_start_thread(struct net_device *dev);
279int ipoib_mcast_stop_thread(struct net_device *dev, int flush); 279int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
280 280
@@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler,
312int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); 312int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
313int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); 313int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
314 314
315void ipoib_pkey_poll(void *dev); 315void ipoib_pkey_poll(struct work_struct *work);
316int ipoib_pkey_dev_delay_open(struct net_device *dev); 316int ipoib_pkey_dev_delay_open(struct net_device *dev);
317 317
318#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 318#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 8bf5e9ec7c95..f10fba5d3265 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -400,10 +400,11 @@ static void __ipoib_reap_ah(struct net_device *dev)
400 spin_unlock_irq(&priv->tx_lock); 400 spin_unlock_irq(&priv->tx_lock);
401} 401}
402 402
403void ipoib_reap_ah(void *dev_ptr) 403void ipoib_reap_ah(struct work_struct *work)
404{ 404{
405 struct net_device *dev = dev_ptr; 405 struct ipoib_dev_priv *priv =
406 struct ipoib_dev_priv *priv = netdev_priv(dev); 406 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
407 struct net_device *dev = priv->dev;
407 408
408 __ipoib_reap_ah(dev); 409 __ipoib_reap_ah(dev);
409 410
@@ -613,10 +614,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
613 return 0; 614 return 0;
614} 615}
615 616
616void ipoib_ib_dev_flush(void *_dev) 617void ipoib_ib_dev_flush(struct work_struct *work)
617{ 618{
618 struct net_device *dev = (struct net_device *)_dev; 619 struct ipoib_dev_priv *cpriv, *priv =
619 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; 620 container_of(work, struct ipoib_dev_priv, flush_task);
621 struct net_device *dev = priv->dev;
620 622
621 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { 623 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
622 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 624 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
@@ -638,14 +640,14 @@ void ipoib_ib_dev_flush(void *_dev)
638 */ 640 */
639 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 641 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
640 ipoib_ib_dev_up(dev); 642 ipoib_ib_dev_up(dev);
641 ipoib_mcast_restart_task(dev); 643 ipoib_mcast_restart_task(&priv->restart_task);
642 } 644 }
643 645
644 mutex_lock(&priv->vlan_mutex); 646 mutex_lock(&priv->vlan_mutex);
645 647
646 /* Flush any child interfaces too */ 648 /* Flush any child interfaces too */
647 list_for_each_entry(cpriv, &priv->child_intfs, list) 649 list_for_each_entry(cpriv, &priv->child_intfs, list)
648 ipoib_ib_dev_flush(cpriv->dev); 650 ipoib_ib_dev_flush(&cpriv->flush_task);
649 651
650 mutex_unlock(&priv->vlan_mutex); 652 mutex_unlock(&priv->vlan_mutex);
651} 653}
@@ -672,10 +674,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
672 * change async notification is available. 674 * change async notification is available.
673 */ 675 */
674 676
675void ipoib_pkey_poll(void *dev_ptr) 677void ipoib_pkey_poll(struct work_struct *work)
676{ 678{
677 struct net_device *dev = dev_ptr; 679 struct ipoib_dev_priv *priv =
678 struct ipoib_dev_priv *priv = netdev_priv(dev); 680 container_of(work, struct ipoib_dev_priv, pkey_task.work);
681 struct net_device *dev = priv->dev;
679 682
680 ipoib_pkey_dev_check_presence(dev); 683 ipoib_pkey_dev_check_presence(dev);
681 684
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5ba3154320b4..c09280243726 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -940,11 +940,11 @@ static void ipoib_setup(struct net_device *dev)
940 INIT_LIST_HEAD(&priv->dead_ahs); 940 INIT_LIST_HEAD(&priv->dead_ahs);
941 INIT_LIST_HEAD(&priv->multicast_list); 941 INIT_LIST_HEAD(&priv->multicast_list);
942 942
943 INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); 943 INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll);
944 INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); 944 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
945 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); 945 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
946 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); 946 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
947 INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); 947 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
948} 948}
949 949
950struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 950struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d282d65e3ee0..b04b72ca32ed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status,
399 mcast->backoff = 1; 399 mcast->backoff = 1;
400 mutex_lock(&mcast_mutex); 400 mutex_lock(&mcast_mutex);
401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
402 queue_work(ipoib_workqueue, &priv->mcast_task); 402 queue_delayed_work(ipoib_workqueue,
403 &priv->mcast_task, 0);
403 mutex_unlock(&mcast_mutex); 404 mutex_unlock(&mcast_mutex);
404 complete(&mcast->done); 405 complete(&mcast->done);
405 return; 406 return;
@@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status,
435 436
436 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { 437 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
437 if (status == -ETIMEDOUT) 438 if (status == -ETIMEDOUT)
438 queue_work(ipoib_workqueue, &priv->mcast_task); 439 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
440 0);
439 else 441 else
440 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 442 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
441 mcast->backoff * HZ); 443 mcast->backoff * HZ);
@@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
517 mcast->query_id = ret; 519 mcast->query_id = ret;
518} 520}
519 521
520void ipoib_mcast_join_task(void *dev_ptr) 522void ipoib_mcast_join_task(struct work_struct *work)
521{ 523{
522 struct net_device *dev = dev_ptr; 524 struct ipoib_dev_priv *priv =
523 struct ipoib_dev_priv *priv = netdev_priv(dev); 525 container_of(work, struct ipoib_dev_priv, mcast_task.work);
526 struct net_device *dev = priv->dev;
524 527
525 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) 528 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
526 return; 529 return;
@@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
610 613
611 mutex_lock(&mcast_mutex); 614 mutex_lock(&mcast_mutex);
612 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 615 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
613 queue_work(ipoib_workqueue, &priv->mcast_task); 616 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
614 mutex_unlock(&mcast_mutex); 617 mutex_unlock(&mcast_mutex);
615 618
616 spin_lock_irq(&priv->lock); 619 spin_lock_irq(&priv->lock);
@@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
818 } 821 }
819} 822}
820 823
821void ipoib_mcast_restart_task(void *dev_ptr) 824void ipoib_mcast_restart_task(struct work_struct *work)
822{ 825{
823 struct net_device *dev = dev_ptr; 826 struct ipoib_dev_priv *priv =
824 struct ipoib_dev_priv *priv = netdev_priv(dev); 827 container_of(work, struct ipoib_dev_priv, restart_task);
828 struct net_device *dev = priv->dev;
825 struct dev_mc_list *mclist; 829 struct dev_mc_list *mclist;
826 struct ipoib_mcast *mcast, *tmcast; 830 struct ipoib_mcast *mcast, *tmcast;
827 LIST_HEAD(remove_list); 831 LIST_HEAD(remove_list);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 18a000034996..693b77002897 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -48,7 +48,7 @@
48 48
49static void iser_cq_tasklet_fn(unsigned long data); 49static void iser_cq_tasklet_fn(unsigned long data);
50static void iser_cq_callback(struct ib_cq *cq, void *cq_context); 50static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
51static void iser_comp_error_worker(void *data); 51static void iser_comp_error_worker(struct work_struct *work);
52 52
53static void iser_cq_event_callback(struct ib_event *cause, void *context) 53static void iser_cq_event_callback(struct ib_event *cause, void *context)
54{ 54{
@@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn)
480 init_waitqueue_head(&ib_conn->wait); 480 init_waitqueue_head(&ib_conn->wait);
481 atomic_set(&ib_conn->post_recv_buf_count, 0); 481 atomic_set(&ib_conn->post_recv_buf_count, 0);
482 atomic_set(&ib_conn->post_send_buf_count, 0); 482 atomic_set(&ib_conn->post_send_buf_count, 0);
483 INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker, 483 INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
484 ib_conn);
485 INIT_LIST_HEAD(&ib_conn->conn_list); 484 INIT_LIST_HEAD(&ib_conn->conn_list);
486 spin_lock_init(&ib_conn->lock); 485 spin_lock_init(&ib_conn->lock);
487 486
@@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc)
754 return ret_val; 753 return ret_val;
755} 754}
756 755
757static void iser_comp_error_worker(void *data) 756static void iser_comp_error_worker(struct work_struct *work)
758{ 757{
759 struct iser_conn *ib_conn = data; 758 struct iser_conn *ib_conn =
759 container_of(work, struct iser_conn, comperror_work);
760 760
761 /* getting here when the state is UP means that the conn is being * 761 /* getting here when the state is UP means that the conn is being *
762 * terminated asynchronously from the iSCSI layer's perspective. */ 762 * terminated asynchronously from the iSCSI layer's perspective. */
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 64ab5fc7cca3..a6289595557b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -390,9 +390,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
390 wait_for_completion(&target->done); 390 wait_for_completion(&target->done);
391} 391}
392 392
393static void srp_remove_work(void *target_ptr) 393static void srp_remove_work(struct work_struct *work)
394{ 394{
395 struct srp_target_port *target = target_ptr; 395 struct srp_target_port *target =
396 container_of(work, struct srp_target_port, work);
396 397
397 spin_lock_irq(target->scsi_host->host_lock); 398 spin_lock_irq(target->scsi_host->host_lock);
398 if (target->state != SRP_TARGET_DEAD) { 399 if (target->state != SRP_TARGET_DEAD) {
@@ -575,7 +576,7 @@ err:
575 spin_lock_irq(target->scsi_host->host_lock); 576 spin_lock_irq(target->scsi_host->host_lock);
576 if (target->state == SRP_TARGET_CONNECTING) { 577 if (target->state == SRP_TARGET_CONNECTING) {
577 target->state = SRP_TARGET_DEAD; 578 target->state = SRP_TARGET_DEAD;
578 INIT_WORK(&target->work, srp_remove_work, target); 579 INIT_WORK(&target->work, srp_remove_work);
579 schedule_work(&target->work); 580 schedule_work(&target->work);
580 } 581 }
581 spin_unlock_irq(target->scsi_host->host_lock); 582 spin_unlock_irq(target->scsi_host->host_lock);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index cbb93669d1ce..8451b29a3db5 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -567,9 +567,9 @@ static int atkbd_set_leds(struct atkbd *atkbd)
567 * interrupt context. 567 * interrupt context.
568 */ 568 */
569 569
570static void atkbd_event_work(void *data) 570static void atkbd_event_work(struct work_struct *work)
571{ 571{
572 struct atkbd *atkbd = data; 572 struct atkbd *atkbd = container_of(work, struct atkbd, event_work);
573 573
574 mutex_lock(&atkbd->event_mutex); 574 mutex_lock(&atkbd->event_mutex);
575 575
@@ -943,7 +943,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
943 943
944 atkbd->dev = dev; 944 atkbd->dev = dev;
945 ps2_init(&atkbd->ps2dev, serio); 945 ps2_init(&atkbd->ps2dev, serio);
946 INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd); 946 INIT_WORK(&atkbd->event_work, atkbd_event_work);
947 mutex_init(&atkbd->event_mutex); 947 mutex_init(&atkbd->event_mutex);
948 948
949 switch (serio->id.type) { 949 switch (serio->id.type) {
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 979b93e33da7..b7f049b45b6b 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -572,9 +572,9 @@ lkkbd_event (struct input_dev *dev, unsigned int type, unsigned int code,
572 * were in. 572 * were in.
573 */ 573 */
574static void 574static void
575lkkbd_reinit (void *data) 575lkkbd_reinit (struct work_struct *work)
576{ 576{
577 struct lkkbd *lk = data; 577 struct lkkbd *lk = container_of(work, struct lkkbd, tq);
578 int division; 578 int division;
579 unsigned char leds_on = 0; 579 unsigned char leds_on = 0;
580 unsigned char leds_off = 0; 580 unsigned char leds_off = 0;
@@ -651,7 +651,7 @@ lkkbd_connect (struct serio *serio, struct serio_driver *drv)
651 651
652 lk->serio = serio; 652 lk->serio = serio;
653 lk->dev = input_dev; 653 lk->dev = input_dev;
654 INIT_WORK (&lk->tq, lkkbd_reinit, lk); 654 INIT_WORK (&lk->tq, lkkbd_reinit);
655 lk->bell_volume = bell_volume; 655 lk->bell_volume = bell_volume;
656 lk->keyclick_volume = keyclick_volume; 656 lk->keyclick_volume = keyclick_volume;
657 lk->ctrlclick_volume = ctrlclick_volume; 657 lk->ctrlclick_volume = ctrlclick_volume;
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index cac4781103c3..6cd887c5eb0a 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -208,9 +208,9 @@ static int sunkbd_initialize(struct sunkbd *sunkbd)
208 * were in. 208 * were in.
209 */ 209 */
210 210
211static void sunkbd_reinit(void *data) 211static void sunkbd_reinit(struct work_struct *work)
212{ 212{
213 struct sunkbd *sunkbd = data; 213 struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
214 214
215 wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); 215 wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
216 216
@@ -248,7 +248,7 @@ static int sunkbd_connect(struct serio *serio, struct serio_driver *drv)
248 sunkbd->serio = serio; 248 sunkbd->serio = serio;
249 sunkbd->dev = input_dev; 249 sunkbd->dev = input_dev;
250 init_waitqueue_head(&sunkbd->wait); 250 init_waitqueue_head(&sunkbd->wait);
251 INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd); 251 INIT_WORK(&sunkbd->tq, sunkbd_reinit);
252 snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys); 252 snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys);
253 253
254 serio_set_drvdata(serio, sunkbd); 254 serio_set_drvdata(serio, sunkbd);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 6f9b2c7cc9c2..52bb2226ce2f 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -888,9 +888,10 @@ static int psmouse_poll(struct psmouse *psmouse)
888 * psmouse_resync() attempts to re-validate current protocol. 888 * psmouse_resync() attempts to re-validate current protocol.
889 */ 889 */
890 890
891static void psmouse_resync(void *p) 891static void psmouse_resync(struct work_struct *work)
892{ 892{
893 struct psmouse *psmouse = p, *parent = NULL; 893 struct psmouse *parent = NULL, *psmouse =
894 container_of(work, struct psmouse, resync_work);
894 struct serio *serio = psmouse->ps2dev.serio; 895 struct serio *serio = psmouse->ps2dev.serio;
895 psmouse_ret_t rc = PSMOUSE_GOOD_DATA; 896 psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
896 int failed = 0, enabled = 0; 897 int failed = 0, enabled = 0;
@@ -1121,7 +1122,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
1121 goto out; 1122 goto out;
1122 1123
1123 ps2_init(&psmouse->ps2dev, serio); 1124 ps2_init(&psmouse->ps2dev, serio);
1124 INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse); 1125 INIT_WORK(&psmouse->resync_work, psmouse_resync);
1125 psmouse->dev = input_dev; 1126 psmouse->dev = input_dev;
1126 snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); 1127 snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
1127 1128
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index e5b1b60757bb..b3e84d3bb7f7 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -251,9 +251,9 @@ EXPORT_SYMBOL(ps2_command);
251 * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.) 251 * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.)
252 */ 252 */
253 253
254static void ps2_execute_scheduled_command(void *data) 254static void ps2_execute_scheduled_command(struct work_struct *work)
255{ 255{
256 struct ps2work *ps2work = data; 256 struct ps2work *ps2work = container_of(work, struct ps2work, work);
257 257
258 ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command); 258 ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command);
259 kfree(ps2work); 259 kfree(ps2work);
@@ -278,7 +278,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman
278 ps2work->ps2dev = ps2dev; 278 ps2work->ps2dev = ps2dev;
279 ps2work->command = command; 279 ps2work->command = command;
280 memcpy(ps2work->param, param, send); 280 memcpy(ps2work->param, param, send);
281 INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work); 281 INIT_WORK(&ps2work->work, ps2_execute_scheduled_command);
282 282
283 if (!schedule_work(&ps2work->work)) { 283 if (!schedule_work(&ps2work->work)) {
284 kfree(ps2work); 284 kfree(ps2work);
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c
index 6ae6eb322111..946c38cf6f8a 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/isdn/act2000/capi.c
@@ -627,8 +627,10 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
627} 627}
628 628
629void 629void
630actcapi_dispatch(act2000_card *card) 630actcapi_dispatch(struct work_struct *work)
631{ 631{
632 struct act2000_card *card =
633 container_of(work, struct act2000_card, rcv_tq);
632 struct sk_buff *skb; 634 struct sk_buff *skb;
633 actcapi_msg *msg; 635 actcapi_msg *msg;
634 __u16 ccmd; 636 __u16 ccmd;
diff --git a/drivers/isdn/act2000/capi.h b/drivers/isdn/act2000/capi.h
index 49f453c53c64..e55f6a931f66 100644
--- a/drivers/isdn/act2000/capi.h
+++ b/drivers/isdn/act2000/capi.h
@@ -356,7 +356,7 @@ extern int actcapi_connect_req(act2000_card *, act2000_chan *, char *, char, int
356extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *); 356extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *);
357extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *); 357extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *);
358extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8); 358extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8);
359extern void actcapi_dispatch(act2000_card *); 359extern void actcapi_dispatch(struct work_struct *);
360#ifdef DEBUG_MSG 360#ifdef DEBUG_MSG
361extern void actcapi_debug_msg(struct sk_buff *skb, int); 361extern void actcapi_debug_msg(struct sk_buff *skb, int);
362#else 362#else
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index d89dcde4eade..90593e2ef872 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -192,8 +192,11 @@ act2000_set_msn(act2000_card *card, char *eazmsn)
192} 192}
193 193
194static void 194static void
195act2000_transmit(struct act2000_card *card) 195act2000_transmit(struct work_struct *work)
196{ 196{
197 struct act2000_card *card =
198 container_of(work, struct act2000_card, snd_tq);
199
197 switch (card->bus) { 200 switch (card->bus) {
198 case ACT2000_BUS_ISA: 201 case ACT2000_BUS_ISA:
199 act2000_isa_send(card); 202 act2000_isa_send(card);
@@ -207,8 +210,11 @@ act2000_transmit(struct act2000_card *card)
207} 210}
208 211
209static void 212static void
210act2000_receive(struct act2000_card *card) 213act2000_receive(struct work_struct *work)
211{ 214{
215 struct act2000_card *card =
216 container_of(work, struct act2000_card, poll_tq);
217
212 switch (card->bus) { 218 switch (card->bus) {
213 case ACT2000_BUS_ISA: 219 case ACT2000_BUS_ISA:
214 act2000_isa_receive(card); 220 act2000_isa_receive(card);
@@ -227,7 +233,7 @@ act2000_poll(unsigned long data)
227 act2000_card * card = (act2000_card *)data; 233 act2000_card * card = (act2000_card *)data;
228 unsigned long flags; 234 unsigned long flags;
229 235
230 act2000_receive(card); 236 act2000_receive(&card->poll_tq);
231 spin_lock_irqsave(&card->lock, flags); 237 spin_lock_irqsave(&card->lock, flags);
232 mod_timer(&card->ptimer, jiffies+3); 238 mod_timer(&card->ptimer, jiffies+3);
233 spin_unlock_irqrestore(&card->lock, flags); 239 spin_unlock_irqrestore(&card->lock, flags);
@@ -578,9 +584,9 @@ act2000_alloccard(int bus, int port, int irq, char *id)
578 skb_queue_head_init(&card->sndq); 584 skb_queue_head_init(&card->sndq);
579 skb_queue_head_init(&card->rcvq); 585 skb_queue_head_init(&card->rcvq);
580 skb_queue_head_init(&card->ackq); 586 skb_queue_head_init(&card->ackq);
581 INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card); 587 INIT_WORK(&card->snd_tq, act2000_transmit);
582 INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card); 588 INIT_WORK(&card->rcv_tq, actcapi_dispatch);
583 INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card); 589 INIT_WORK(&card->poll_tq, act2000_receive);
584 init_timer(&card->ptimer); 590 init_timer(&card->ptimer);
585 card->interface.owner = THIS_MODULE; 591 card->interface.owner = THIS_MODULE;
586 card->interface.channels = ACT2000_BCH; 592 card->interface.channels = ACT2000_BCH;
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 8c4fcb9027b3..783a25526315 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -208,9 +208,10 @@ static void notify_down(u32 contr)
208 } 208 }
209} 209}
210 210
211static void notify_handler(void *data) 211static void notify_handler(struct work_struct *work)
212{ 212{
213 struct capi_notifier *np = data; 213 struct capi_notifier *np =
214 container_of(work, struct capi_notifier, work);
214 215
215 switch (np->cmd) { 216 switch (np->cmd) {
216 case KCI_CONTRUP: 217 case KCI_CONTRUP:
@@ -235,7 +236,7 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci)
235 if (!np) 236 if (!np)
236 return -ENOMEM; 237 return -ENOMEM;
237 238
238 INIT_WORK(&np->work, notify_handler, np); 239 INIT_WORK(&np->work, notify_handler);
239 np->cmd = cmd; 240 np->cmd = cmd;
240 np->controller = controller; 241 np->controller = controller;
241 np->applid = applid; 242 np->applid = applid;
@@ -248,10 +249,11 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci)
248 249
249/* -------- Receiver ------------------------------------------ */ 250/* -------- Receiver ------------------------------------------ */
250 251
251static void recv_handler(void *_ap) 252static void recv_handler(struct work_struct *work)
252{ 253{
253 struct sk_buff *skb; 254 struct sk_buff *skb;
254 struct capi20_appl *ap = (struct capi20_appl *) _ap; 255 struct capi20_appl *ap =
256 container_of(work, struct capi20_appl, recv_work);
255 257
256 if ((!ap) || (ap->release_in_progress)) 258 if ((!ap) || (ap->release_in_progress))
257 return; 259 return;
@@ -527,7 +529,7 @@ u16 capi20_register(struct capi20_appl *ap)
527 ap->callback = NULL; 529 ap->callback = NULL;
528 init_MUTEX(&ap->recv_sem); 530 init_MUTEX(&ap->recv_sem);
529 skb_queue_head_init(&ap->recv_queue); 531 skb_queue_head_init(&ap->recv_queue);
530 INIT_WORK(&ap->recv_work, recv_handler, (void *)ap); 532 INIT_WORK(&ap->recv_work, recv_handler);
531 ap->release_in_progress = 0; 533 ap->release_in_progress = 0;
532 534
533 write_unlock_irqrestore(&application_lock, flags); 535 write_unlock_irqrestore(&application_lock, flags);
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 7bbfd85ab793..fd5d7364a487 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -194,41 +194,11 @@ static int avmcs_config(struct pcmcia_device *link)
194 194
195 dev = link->priv; 195 dev = link->priv;
196 196
197 /*
198 This reads the card's CONFIG tuple to find its configuration
199 registers.
200 */
201 do { 197 do {
202 tuple.DesiredTuple = CISTPL_CONFIG;
203 i = pcmcia_get_first_tuple(link, &tuple);
204 if (i != CS_SUCCESS) break;
205 tuple.TupleData = buf;
206 tuple.TupleDataMax = 64;
207 tuple.TupleOffset = 0;
208 i = pcmcia_get_tuple_data(link, &tuple);
209 if (i != CS_SUCCESS) break;
210 i = pcmcia_parse_tuple(link, &tuple, &parse);
211 if (i != CS_SUCCESS) break;
212 link->conf.ConfigBase = parse.config.base;
213 } while (0);
214 if (i != CS_SUCCESS) {
215 cs_error(link, ParseTuple, i);
216 return -ENODEV;
217 }
218
219 do {
220
221 tuple.Attributes = 0;
222 tuple.TupleData = buf;
223 tuple.TupleDataMax = 254;
224 tuple.TupleOffset = 0;
225 tuple.DesiredTuple = CISTPL_VERS_1;
226
227 devname[0] = 0; 198 devname[0] = 0;
228 if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) { 199 if (link->prod_id[1])
229 strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1], 200 strlcpy(devname, link->prod_id[1], sizeof(devname));
230 sizeof(devname)); 201
231 }
232 /* 202 /*
233 * find IO port 203 * find IO port
234 */ 204 */
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index bec59010bc66..3b19caeba258 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -232,9 +232,10 @@ Amd7930_new_ph(struct IsdnCardState *cs)
232 232
233 233
234static void 234static void
235Amd7930_bh(struct IsdnCardState *cs) 235Amd7930_bh(struct work_struct *work)
236{ 236{
237 237 struct IsdnCardState *cs =
238 container_of(work, struct IsdnCardState, tqueue);
238 struct PStack *stptr; 239 struct PStack *stptr;
239 240
240 if (!cs) 241 if (!cs)
@@ -789,7 +790,7 @@ Amd7930_init(struct IsdnCardState *cs)
789void __devinit 790void __devinit
790setup_Amd7930(struct IsdnCardState *cs) 791setup_Amd7930(struct IsdnCardState *cs)
791{ 792{
792 INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs); 793 INIT_WORK(&cs->tqueue, Amd7930_bh);
793 cs->dbusytimer.function = (void *) dbusy_timer_handler; 794 cs->dbusytimer.function = (void *) dbusy_timer_handler;
794 cs->dbusytimer.data = (long) cs; 795 cs->dbusytimer.data = (long) cs;
795 init_timer(&cs->dbusytimer); 796 init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index ac28e3278ad9..876fec6c6be8 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -216,41 +216,11 @@ static int avma1cs_config(struct pcmcia_device *link)
216 216
217 DEBUG(0, "avma1cs_config(0x%p)\n", link); 217 DEBUG(0, "avma1cs_config(0x%p)\n", link);
218 218
219 /*
220 This reads the card's CONFIG tuple to find its configuration
221 registers.
222 */
223 do { 219 do {
224 tuple.DesiredTuple = CISTPL_CONFIG;
225 i = pcmcia_get_first_tuple(link, &tuple);
226 if (i != CS_SUCCESS) break;
227 tuple.TupleData = buf;
228 tuple.TupleDataMax = 64;
229 tuple.TupleOffset = 0;
230 i = pcmcia_get_tuple_data(link, &tuple);
231 if (i != CS_SUCCESS) break;
232 i = pcmcia_parse_tuple(link, &tuple, &parse);
233 if (i != CS_SUCCESS) break;
234 link->conf.ConfigBase = parse.config.base;
235 } while (0);
236 if (i != CS_SUCCESS) {
237 cs_error(link, ParseTuple, i);
238 return -ENODEV;
239 }
240
241 do {
242
243 tuple.Attributes = 0;
244 tuple.TupleData = buf;
245 tuple.TupleDataMax = 254;
246 tuple.TupleOffset = 0;
247 tuple.DesiredTuple = CISTPL_VERS_1;
248
249 devname[0] = 0; 220 devname[0] = 0;
250 if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) { 221 if (link->prod_id[1])
251 strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1], 222 strlcpy(devname, link->prod_id[1], sizeof(devname));
252 sizeof(devname)); 223
253 }
254 /* 224 /*
255 * find IO port 225 * find IO port
256 */ 226 */
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 785b08554fca..cede72cdbb31 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1137,7 +1137,6 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
1137 cs->tx_skb = NULL; 1137 cs->tx_skb = NULL;
1138 cs->tx_cnt = 0; 1138 cs->tx_cnt = 0;
1139 cs->event = 0; 1139 cs->event = 0;
1140 cs->tqueue.data = cs;
1141 1140
1142 skb_queue_head_init(&cs->rq); 1141 skb_queue_head_init(&cs->rq);
1143 skb_queue_head_init(&cs->sq); 1142 skb_queue_head_init(&cs->sq);
@@ -1554,7 +1553,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg);
1554static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg); 1553static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg);
1555static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs); 1554static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs);
1556static void hisax_bc_close(struct BCState *bcs); 1555static void hisax_bc_close(struct BCState *bcs);
1557static void hisax_bh(struct IsdnCardState *cs); 1556static void hisax_bh(struct work_struct *work);
1558static void EChannel_proc_rcv(struct hisax_d_if *d_if); 1557static void EChannel_proc_rcv(struct hisax_d_if *d_if);
1559 1558
1560int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], 1559int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
@@ -1586,7 +1585,7 @@ int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
1586 hisax_d_if->cs = cs; 1585 hisax_d_if->cs = cs;
1587 cs->hw.hisax_d_if = hisax_d_if; 1586 cs->hw.hisax_d_if = hisax_d_if;
1588 cs->cardmsg = hisax_cardmsg; 1587 cs->cardmsg = hisax_cardmsg;
1589 INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs); 1588 INIT_WORK(&cs->tqueue, hisax_bh);
1590 cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1; 1589 cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1;
1591 for (i = 0; i < 2; i++) { 1590 for (i = 0; i < 2; i++) {
1592 cs->bcs[i].BC_SetStack = hisax_bc_setstack; 1591 cs->bcs[i].BC_SetStack = hisax_bc_setstack;
@@ -1618,8 +1617,10 @@ static void hisax_sched_event(struct IsdnCardState *cs, int event)
1618 schedule_work(&cs->tqueue); 1617 schedule_work(&cs->tqueue);
1619} 1618}
1620 1619
1621static void hisax_bh(struct IsdnCardState *cs) 1620static void hisax_bh(struct work_struct *work)
1622{ 1621{
1622 struct IsdnCardState *cs =
1623 container_of(work, struct IsdnCardState, tqueue);
1623 struct PStack *st; 1624 struct PStack *st;
1624 int pr; 1625 int pr;
1625 1626
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index e18e75be8ed3..4e180d210faa 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -242,23 +242,6 @@ static int elsa_cs_config(struct pcmcia_device *link)
242 DEBUG(0, "elsa_config(0x%p)\n", link); 242 DEBUG(0, "elsa_config(0x%p)\n", link);
243 dev = link->priv; 243 dev = link->priv;
244 244
245 /*
246 This reads the card's CONFIG tuple to find its configuration
247 registers.
248 */
249 tuple.DesiredTuple = CISTPL_CONFIG;
250 tuple.TupleData = (cisdata_t *)buf;
251 tuple.TupleDataMax = 255;
252 tuple.TupleOffset = 0;
253 tuple.Attributes = 0;
254 i = first_tuple(link, &tuple, &parse);
255 if (i != CS_SUCCESS) {
256 last_fn = ParseTuple;
257 goto cs_failed;
258 }
259 link->conf.ConfigBase = parse.config.base;
260 link->conf.Present = parse.config.rmask[0];
261
262 tuple.TupleData = (cisdata_t *)buf; 245 tuple.TupleData = (cisdata_t *)buf;
263 tuple.TupleOffset = 0; tuple.TupleDataMax = 255; 246 tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
264 tuple.Attributes = 0; 247 tuple.Attributes = 0;
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index d852c9d998b2..de9b1a4d6bac 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1083,8 +1083,9 @@ tx_b_frame(struct hfc4s8s_btype *bch)
1083/* bottom half handler for interrupt */ 1083/* bottom half handler for interrupt */
1084/*************************************/ 1084/*************************************/
1085static void 1085static void
1086hfc4s8s_bh(hfc4s8s_hw * hw) 1086hfc4s8s_bh(struct work_struct *work)
1087{ 1087{
1088 hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
1088 u_char b; 1089 u_char b;
1089 struct hfc4s8s_l1 *l1p; 1090 struct hfc4s8s_l1 *l1p;
1090 volatile u_char *fifo_stat; 1091 volatile u_char *fifo_stat;
@@ -1550,7 +1551,7 @@ setup_instance(hfc4s8s_hw * hw)
1550 goto out; 1551 goto out;
1551 } 1552 }
1552 1553
1553 INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw); 1554 INIT_WORK(&hw->tqueue, hfc4s8s_bh);
1554 1555
1555 if (request_irq 1556 if (request_irq
1556 (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) { 1557 (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index 6360e8214720..8d9864453a23 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -549,10 +549,11 @@ setstack_2b(struct PStack *st, struct BCState *bcs)
549} 549}
550 550
551static void 551static void
552hfcd_bh(struct IsdnCardState *cs) 552hfcd_bh(struct work_struct *work)
553{ 553{
554 if (!cs) 554 struct IsdnCardState *cs =
555 return; 555 container_of(work, struct IsdnCardState, tqueue);
556
556 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { 557 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
557 switch (cs->dc.hfcd.ph_state) { 558 switch (cs->dc.hfcd.ph_state) {
558 case (0): 559 case (0):
@@ -1072,5 +1073,5 @@ set_cs_func(struct IsdnCardState *cs)
1072 cs->dbusytimer.function = (void *) hfc_dbusy_timer; 1073 cs->dbusytimer.function = (void *) hfc_dbusy_timer;
1073 cs->dbusytimer.data = (long) cs; 1074 cs->dbusytimer.data = (long) cs;
1074 init_timer(&cs->dbusytimer); 1075 init_timer(&cs->dbusytimer);
1075 INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs); 1076 INIT_WORK(&cs->tqueue, hfcd_bh);
1076} 1077}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 93f60b563515..5db0a85b827f 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1506,8 +1506,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs)
1506/* handle L1 state changes */ 1506/* handle L1 state changes */
1507/***************************/ 1507/***************************/
1508static void 1508static void
1509hfcpci_bh(struct IsdnCardState *cs) 1509hfcpci_bh(struct work_struct *work)
1510{ 1510{
1511 struct IsdnCardState *cs =
1512 container_of(work, struct IsdnCardState, tqueue);
1511 u_long flags; 1513 u_long flags;
1512// struct PStack *stptr; 1514// struct PStack *stptr;
1513 1515
@@ -1722,7 +1724,7 @@ setup_hfcpci(struct IsdnCard *card)
1722 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); 1724 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1723 /* At this point the needed PCI config is done */ 1725 /* At this point the needed PCI config is done */
1724 /* fifos are still not enabled */ 1726 /* fifos are still not enabled */
1725 INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs); 1727 INIT_WORK(&cs->tqueue, hfcpci_bh);
1726 cs->setstack_d = setstack_hfcpci; 1728 cs->setstack_d = setstack_hfcpci;
1727 cs->BC_Send_Data = &hfcpci_send_data; 1729 cs->BC_Send_Data = &hfcpci_send_data;
1728 cs->readisac = NULL; 1730 cs->readisac = NULL;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 954d1536db1f..4fd09d21a27f 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1251,8 +1251,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs)
1251/* handle L1 state changes */ 1251/* handle L1 state changes */
1252/***************************/ 1252/***************************/
1253static void 1253static void
1254hfcsx_bh(struct IsdnCardState *cs) 1254hfcsx_bh(struct work_struct *work)
1255{ 1255{
1256 struct IsdnCardState *cs =
1257 container_of(work, struct IsdnCardState, tqueue);
1256 u_long flags; 1258 u_long flags;
1257 1259
1258 if (!cs) 1260 if (!cs)
@@ -1499,7 +1501,7 @@ setup_hfcsx(struct IsdnCard *card)
1499 cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; 1501 cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
1500 cs->dbusytimer.data = (long) cs; 1502 cs->dbusytimer.data = (long) cs;
1501 init_timer(&cs->dbusytimer); 1503 init_timer(&cs->dbusytimer);
1502 INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs); 1504 INIT_WORK(&cs->tqueue, hfcsx_bh);
1503 cs->readisac = NULL; 1505 cs->readisac = NULL;
1504 cs->writeisac = NULL; 1506 cs->writeisac = NULL;
1505 cs->readisacfifo = NULL; 1507 cs->readisacfifo = NULL;
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index da706925d54d..682cac32f259 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -77,8 +77,10 @@ icc_new_ph(struct IsdnCardState *cs)
77} 77}
78 78
79static void 79static void
80icc_bh(struct IsdnCardState *cs) 80icc_bh(struct work_struct *work)
81{ 81{
82 struct IsdnCardState *cs =
83 container_of(work, struct IsdnCardState, tqueue);
82 struct PStack *stptr; 84 struct PStack *stptr;
83 85
84 if (!cs) 86 if (!cs)
@@ -674,7 +676,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs)
674void __devinit 676void __devinit
675setup_icc(struct IsdnCardState *cs) 677setup_icc(struct IsdnCardState *cs)
676{ 678{
677 INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs); 679 INIT_WORK(&cs->tqueue, icc_bh);
678 cs->dbusytimer.function = (void *) dbusy_timer_handler; 680 cs->dbusytimer.function = (void *) dbusy_timer_handler;
679 cs->dbusytimer.data = (long) cs; 681 cs->dbusytimer.data = (long) cs;
680 init_timer(&cs->dbusytimer); 682 init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 282f349408bc..4e9f23803dae 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -81,8 +81,10 @@ isac_new_ph(struct IsdnCardState *cs)
81} 81}
82 82
83static void 83static void
84isac_bh(struct IsdnCardState *cs) 84isac_bh(struct work_struct *work)
85{ 85{
86 struct IsdnCardState *cs =
87 container_of(work, struct IsdnCardState, tqueue);
86 struct PStack *stptr; 88 struct PStack *stptr;
87 89
88 if (!cs) 90 if (!cs)
@@ -674,7 +676,7 @@ clear_pending_isac_ints(struct IsdnCardState *cs)
674void __devinit 676void __devinit
675setup_isac(struct IsdnCardState *cs) 677setup_isac(struct IsdnCardState *cs)
676{ 678{
677 INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs); 679 INIT_WORK(&cs->tqueue, isac_bh);
678 cs->dbusytimer.function = (void *) dbusy_timer_handler; 680 cs->dbusytimer.function = (void *) dbusy_timer_handler;
679 cs->dbusytimer.data = (long) cs; 681 cs->dbusytimer.data = (long) cs;
680 init_timer(&cs->dbusytimer); 682 init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 674af673ff96..6f1a6583b17d 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -437,8 +437,10 @@ extern void BChannel_bh(struct BCState *);
437#define B_LL_OK 10 437#define B_LL_OK 10
438 438
439static void 439static void
440isar_bh(struct BCState *bcs) 440isar_bh(struct work_struct *work)
441{ 441{
442 struct BCState *bcs = container_of(work, struct BCState, tqueue);
443
442 BChannel_bh(bcs); 444 BChannel_bh(bcs);
443 if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event)) 445 if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event))
444 ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR); 446 ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR);
@@ -1580,7 +1582,7 @@ isar_setup(struct IsdnCardState *cs)
1580 cs->bcs[i].mode = 0; 1582 cs->bcs[i].mode = 0;
1581 cs->bcs[i].hw.isar.dpath = i + 1; 1583 cs->bcs[i].hw.isar.dpath = i + 1;
1582 modeisar(&cs->bcs[i], 0, 0); 1584 modeisar(&cs->bcs[i], 0, 0);
1583 INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]); 1585 INIT_WORK(&cs->bcs[i].tqueue, isar_bh);
1584 } 1586 }
1585} 1587}
1586 1588
diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c
index bab356886483..a14204ec88ee 100644
--- a/drivers/isdn/hisax/isdnl1.c
+++ b/drivers/isdn/hisax/isdnl1.c
@@ -315,8 +315,10 @@ BChannel_proc_ack(struct BCState *bcs)
315} 315}
316 316
317void 317void
318BChannel_bh(struct BCState *bcs) 318BChannel_bh(struct work_struct *work)
319{ 319{
320 struct BCState *bcs = container_of(work, struct BCState, tqueue);
321
320 if (!bcs) 322 if (!bcs)
321 return; 323 return;
322 if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event)) 324 if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event))
@@ -362,7 +364,7 @@ init_bcstate(struct IsdnCardState *cs, int bc)
362 364
363 bcs->cs = cs; 365 bcs->cs = cs;
364 bcs->channel = bc; 366 bcs->channel = bc;
365 INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs); 367 INIT_WORK(&bcs->tqueue, BChannel_bh);
366 spin_lock_init(&bcs->aclock); 368 spin_lock_init(&bcs->aclock);
367 bcs->BC_SetStack = NULL; 369 bcs->BC_SetStack = NULL;
368 bcs->BC_Close = NULL; 370 bcs->BC_Close = NULL;
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index f9c14a2970bc..46ed65334c51 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -233,20 +233,10 @@ static int sedlbauer_config(struct pcmcia_device *link)
233 233
234 DEBUG(0, "sedlbauer_config(0x%p)\n", link); 234 DEBUG(0, "sedlbauer_config(0x%p)\n", link);
235 235
236 /*
237 This reads the card's CONFIG tuple to find its configuration
238 registers.
239 */
240 tuple.DesiredTuple = CISTPL_CONFIG;
241 tuple.Attributes = 0; 236 tuple.Attributes = 0;
242 tuple.TupleData = buf; 237 tuple.TupleData = buf;
243 tuple.TupleDataMax = sizeof(buf); 238 tuple.TupleDataMax = sizeof(buf);
244 tuple.TupleOffset = 0; 239 tuple.TupleOffset = 0;
245 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
246 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
247 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
248 link->conf.ConfigBase = parse.config.base;
249 link->conf.Present = parse.config.rmask[0];
250 240
251 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf)); 241 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
252 242
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index afcc2aeadb34..6b754f183796 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -232,23 +232,6 @@ static int teles_cs_config(struct pcmcia_device *link)
232 DEBUG(0, "teles_config(0x%p)\n", link); 232 DEBUG(0, "teles_config(0x%p)\n", link);
233 dev = link->priv; 233 dev = link->priv;
234 234
235 /*
236 This reads the card's CONFIG tuple to find its configuration
237 registers.
238 */
239 tuple.DesiredTuple = CISTPL_CONFIG;
240 tuple.TupleData = (cisdata_t *)buf;
241 tuple.TupleDataMax = 255;
242 tuple.TupleOffset = 0;
243 tuple.Attributes = 0;
244 i = first_tuple(link, &tuple, &parse);
245 if (i != CS_SUCCESS) {
246 last_fn = ParseTuple;
247 goto cs_failed;
248 }
249 link->conf.ConfigBase = parse.config.base;
250 link->conf.Present = parse.config.rmask[0];
251
252 tuple.TupleData = (cisdata_t *)buf; 235 tuple.TupleData = (cisdata_t *)buf;
253 tuple.TupleOffset = 0; tuple.TupleDataMax = 255; 236 tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
254 tuple.Attributes = 0; 237 tuple.Attributes = 0;
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index 1655341797a9..3aeceaf9769e 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -101,8 +101,10 @@ W6692_new_ph(struct IsdnCardState *cs)
101} 101}
102 102
103static void 103static void
104W6692_bh(struct IsdnCardState *cs) 104W6692_bh(struct work_struct *work)
105{ 105{
106 struct IsdnCardState *cs =
107 container_of(work, struct IsdnCardState, tqueue);
106 struct PStack *stptr; 108 struct PStack *stptr;
107 109
108 if (!cs) 110 if (!cs)
@@ -1070,7 +1072,7 @@ setup_w6692(struct IsdnCard *card)
1070 id_list[cs->subtyp].card_name, cs->irq, 1072 id_list[cs->subtyp].card_name, cs->irq,
1071 cs->hw.w6692.iobase); 1073 cs->hw.w6692.iobase);
1072 1074
1073 INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs); 1075 INIT_WORK(&cs->tqueue, W6692_bh);
1074 cs->readW6692 = &ReadW6692; 1076 cs->readW6692 = &ReadW6692;
1075 cs->writeW6692 = &WriteW6692; 1077 cs->writeW6692 = &WriteW6692;
1076 cs->readisacfifo = &ReadISACfifo; 1078 cs->readisacfifo = &ReadISACfifo;
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f8d6ae66b41..2e4daebfb7e0 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -984,9 +984,9 @@ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb)
984/* 984/*
985 * called from tq_immediate 985 * called from tq_immediate
986 */ 986 */
987static void isdn_net_softint(void *private) 987static void isdn_net_softint(struct work_struct *work)
988{ 988{
989 isdn_net_local *lp = private; 989 isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
990 struct sk_buff *skb; 990 struct sk_buff *skb;
991 991
992 spin_lock_bh(&lp->xmit_lock); 992 spin_lock_bh(&lp->xmit_lock);
@@ -2596,7 +2596,7 @@ isdn_net_new(char *name, struct net_device *master)
2596 netdev->local->netdev = netdev; 2596 netdev->local->netdev = netdev;
2597 netdev->local->next = netdev->local; 2597 netdev->local->next = netdev->local;
2598 2598
2599 INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local); 2599 INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
2600 spin_lock_init(&netdev->local->xmit_lock); 2600 spin_lock_init(&netdev->local->xmit_lock);
2601 2601
2602 netdev->local->isdn_device = -1; 2602 netdev->local->isdn_device = -1;
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 6ead5e1508b7..1966f3410a13 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -68,8 +68,6 @@ static void pcbit_set_msn(struct pcbit_dev *dev, char *list);
68static int pcbit_check_msn(struct pcbit_dev *dev, char *msn); 68static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
69 69
70 70
71extern void pcbit_deliver(void * data);
72
73int pcbit_init_dev(int board, int mem_base, int irq) 71int pcbit_init_dev(int board, int mem_base, int irq)
74{ 72{
75 struct pcbit_dev *dev; 73 struct pcbit_dev *dev;
@@ -129,7 +127,7 @@ int pcbit_init_dev(int board, int mem_base, int irq)
129 memset(dev->b2, 0, sizeof(struct pcbit_chan)); 127 memset(dev->b2, 0, sizeof(struct pcbit_chan));
130 dev->b2->id = 1; 128 dev->b2->id = 1;
131 129
132 INIT_WORK(&dev->qdelivery, pcbit_deliver, dev); 130 INIT_WORK(&dev->qdelivery, pcbit_deliver);
133 131
134 /* 132 /*
135 * interrupts 133 * interrupts
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index 937fd2120381..0c9f6df873fc 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -67,7 +67,6 @@ extern void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg,
67 * Prototypes 67 * Prototypes
68 */ 68 */
69 69
70void pcbit_deliver(void *data);
71static void pcbit_transmit(struct pcbit_dev *dev); 70static void pcbit_transmit(struct pcbit_dev *dev);
72 71
73static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack); 72static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack);
@@ -299,11 +298,12 @@ pcbit_transmit(struct pcbit_dev *dev)
299 */ 298 */
300 299
301void 300void
302pcbit_deliver(void *data) 301pcbit_deliver(struct work_struct *work)
303{ 302{
304 struct frame_buf *frame; 303 struct frame_buf *frame;
305 unsigned long flags, msg; 304 unsigned long flags, msg;
306 struct pcbit_dev *dev = (struct pcbit_dev *) data; 305 struct pcbit_dev *dev =
306 container_of(work, struct pcbit_dev, qdelivery);
307 307
308 spin_lock_irqsave(&dev->lock, flags); 308 spin_lock_irqsave(&dev->lock, flags);
309 309
diff --git a/drivers/isdn/pcbit/pcbit.h b/drivers/isdn/pcbit/pcbit.h
index 388bacefd23a..19c18e88ff16 100644
--- a/drivers/isdn/pcbit/pcbit.h
+++ b/drivers/isdn/pcbit/pcbit.h
@@ -166,4 +166,6 @@ struct pcbit_ioctl {
166#define L2_RUNNING 5 166#define L2_RUNNING 5
167#define L2_ERROR 6 167#define L2_ERROR 6
168 168
169extern void pcbit_deliver(struct work_struct *work);
170
169#endif 171#endif
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index f1b6f563673a..5ed41fe84e57 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -48,7 +48,8 @@ struct rackmeter_dma {
48} ____cacheline_aligned; 48} ____cacheline_aligned;
49 49
50struct rackmeter_cpu { 50struct rackmeter_cpu {
51 struct work_struct sniffer; 51 struct delayed_work sniffer;
52 struct rackmeter *rm;
52 cputime64_t prev_wall; 53 cputime64_t prev_wall;
53 cputime64_t prev_idle; 54 cputime64_t prev_idle;
54 int zero; 55 int zero;
@@ -208,11 +209,12 @@ static void rackmeter_setup_dbdma(struct rackmeter *rm)
208 rackmeter_do_pause(rm, 0); 209 rackmeter_do_pause(rm, 0);
209} 210}
210 211
211static void rackmeter_do_timer(void *data) 212static void rackmeter_do_timer(struct work_struct *work)
212{ 213{
213 struct rackmeter *rm = data; 214 struct rackmeter_cpu *rcpu =
215 container_of(work, struct rackmeter_cpu, sniffer.work);
216 struct rackmeter *rm = rcpu->rm;
214 unsigned int cpu = smp_processor_id(); 217 unsigned int cpu = smp_processor_id();
215 struct rackmeter_cpu *rcpu = &rm->cpu[cpu];
216 cputime64_t cur_jiffies, total_idle_ticks; 218 cputime64_t cur_jiffies, total_idle_ticks;
217 unsigned int total_ticks, idle_ticks; 219 unsigned int total_ticks, idle_ticks;
218 int i, offset, load, cumm, pause; 220 int i, offset, load, cumm, pause;
@@ -263,8 +265,10 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
263 * on those machines yet 265 * on those machines yet
264 */ 266 */
265 267
266 INIT_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer, rm); 268 rm->cpu[0].rm = rm;
267 INIT_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer, rm); 269 INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
270 rm->cpu[1].rm = rm;
271 INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
268 272
269 for_each_online_cpu(cpu) { 273 for_each_online_cpu(cpu) {
270 struct rackmeter_cpu *rcpu; 274 struct rackmeter_cpu *rcpu;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 4f724cdd2efa..6dde27ab79a8 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -601,7 +601,7 @@ core_initcall(smu_late_init);
601 * sysfs visibility 601 * sysfs visibility
602 */ 602 */
603 603
604static void smu_expose_childs(void *unused) 604static void smu_expose_childs(struct work_struct *unused)
605{ 605{
606 struct device_node *np; 606 struct device_node *np;
607 607
@@ -611,7 +611,7 @@ static void smu_expose_childs(void *unused)
611 &smu->of_dev->dev); 611 &smu->of_dev->dev);
612} 612}
613 613
614static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL); 614static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
615 615
616static int smu_platform_probe(struct of_device* dev, 616static int smu_platform_probe(struct of_device* dev,
617 const struct of_device_id *match) 617 const struct of_device_id *match)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 08a40f4e4f60..ed2d4ef27fd8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -458,11 +458,11 @@ static void dec_pending(struct crypt_io *io, int error)
458 * interrupt context. 458 * interrupt context.
459 */ 459 */
460static struct workqueue_struct *_kcryptd_workqueue; 460static struct workqueue_struct *_kcryptd_workqueue;
461static void kcryptd_do_work(void *data); 461static void kcryptd_do_work(struct work_struct *work);
462 462
463static void kcryptd_queue_io(struct crypt_io *io) 463static void kcryptd_queue_io(struct crypt_io *io)
464{ 464{
465 INIT_WORK(&io->work, kcryptd_do_work, io); 465 INIT_WORK(&io->work, kcryptd_do_work);
466 queue_work(_kcryptd_workqueue, &io->work); 466 queue_work(_kcryptd_workqueue, &io->work);
467} 467}
468 468
@@ -618,9 +618,9 @@ static void process_read_endio(struct crypt_io *io)
618 dec_pending(io, crypt_convert(cc, &ctx)); 618 dec_pending(io, crypt_convert(cc, &ctx));
619} 619}
620 620
621static void kcryptd_do_work(void *data) 621static void kcryptd_do_work(struct work_struct *work)
622{ 622{
623 struct crypt_io *io = data; 623 struct crypt_io *io = container_of(work, struct crypt_io, work);
624 624
625 if (io->post_process) 625 if (io->post_process)
626 process_read_endio(io); 626 process_read_endio(io);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d754e0bc6e90..e77ee6fd1044 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -104,8 +104,8 @@ typedef int (*action_fn) (struct pgpath *pgpath);
104static kmem_cache_t *_mpio_cache; 104static kmem_cache_t *_mpio_cache;
105 105
106struct workqueue_struct *kmultipathd; 106struct workqueue_struct *kmultipathd;
107static void process_queued_ios(void *data); 107static void process_queued_ios(struct work_struct *work);
108static void trigger_event(void *data); 108static void trigger_event(struct work_struct *work);
109 109
110 110
111/*----------------------------------------------- 111/*-----------------------------------------------
@@ -173,8 +173,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
173 INIT_LIST_HEAD(&m->priority_groups); 173 INIT_LIST_HEAD(&m->priority_groups);
174 spin_lock_init(&m->lock); 174 spin_lock_init(&m->lock);
175 m->queue_io = 1; 175 m->queue_io = 1;
176 INIT_WORK(&m->process_queued_ios, process_queued_ios, m); 176 INIT_WORK(&m->process_queued_ios, process_queued_ios);
177 INIT_WORK(&m->trigger_event, trigger_event, m); 177 INIT_WORK(&m->trigger_event, trigger_event);
178 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 178 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
179 if (!m->mpio_pool) { 179 if (!m->mpio_pool) {
180 kfree(m); 180 kfree(m);
@@ -379,9 +379,10 @@ static void dispatch_queued_ios(struct multipath *m)
379 } 379 }
380} 380}
381 381
382static void process_queued_ios(void *data) 382static void process_queued_ios(struct work_struct *work)
383{ 383{
384 struct multipath *m = (struct multipath *) data; 384 struct multipath *m =
385 container_of(work, struct multipath, process_queued_ios);
385 struct hw_handler *hwh = &m->hw_handler; 386 struct hw_handler *hwh = &m->hw_handler;
386 struct pgpath *pgpath = NULL; 387 struct pgpath *pgpath = NULL;
387 unsigned init_required = 0, must_queue = 1; 388 unsigned init_required = 0, must_queue = 1;
@@ -421,9 +422,10 @@ out:
421 * An event is triggered whenever a path is taken out of use. 422 * An event is triggered whenever a path is taken out of use.
422 * Includes path failure and PG bypass. 423 * Includes path failure and PG bypass.
423 */ 424 */
424static void trigger_event(void *data) 425static void trigger_event(struct work_struct *work)
425{ 426{
426 struct multipath *m = (struct multipath *) data; 427 struct multipath *m =
428 container_of(work, struct multipath, trigger_event);
427 429
428 dm_table_event(m->ti->table); 430 dm_table_event(m->ti->table);
429} 431}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 48a653b3f518..fc8cbb168e3e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -883,7 +883,7 @@ static void do_mirror(struct mirror_set *ms)
883 do_writes(ms, &writes); 883 do_writes(ms, &writes);
884} 884}
885 885
886static void do_work(void *ignored) 886static void do_work(struct work_struct *ignored)
887{ 887{
888 struct mirror_set *ms; 888 struct mirror_set *ms;
889 889
@@ -1269,7 +1269,7 @@ static int __init dm_mirror_init(void)
1269 dm_dirty_log_exit(); 1269 dm_dirty_log_exit();
1270 return r; 1270 return r;
1271 } 1271 }
1272 INIT_WORK(&_kmirrord_work, do_work, NULL); 1272 INIT_WORK(&_kmirrord_work, do_work);
1273 1273
1274 r = dm_register_target(&mirror_target); 1274 r = dm_register_target(&mirror_target);
1275 if (r < 0) { 1275 if (r < 0) {
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 5281e0094072..91c7aa1fed0e 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,7 +40,7 @@
40#define SNAPSHOT_PAGES 256 40#define SNAPSHOT_PAGES 256
41 41
42struct workqueue_struct *ksnapd; 42struct workqueue_struct *ksnapd;
43static void flush_queued_bios(void *data); 43static void flush_queued_bios(struct work_struct *work);
44 44
45struct pending_exception { 45struct pending_exception {
46 struct exception e; 46 struct exception e;
@@ -528,7 +528,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
528 } 528 }
529 529
530 bio_list_init(&s->queued_bios); 530 bio_list_init(&s->queued_bios);
531 INIT_WORK(&s->queued_bios_work, flush_queued_bios, s); 531 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
532 532
533 /* Add snapshot to the list of snapshots for this origin */ 533 /* Add snapshot to the list of snapshots for this origin */
534 /* Exceptions aren't triggered till snapshot_resume() is called */ 534 /* Exceptions aren't triggered till snapshot_resume() is called */
@@ -603,9 +603,10 @@ static void flush_bios(struct bio *bio)
603 } 603 }
604} 604}
605 605
606static void flush_queued_bios(void *data) 606static void flush_queued_bios(struct work_struct *work)
607{ 607{
608 struct dm_snapshot *s = (struct dm_snapshot *) data; 608 struct dm_snapshot *s =
609 container_of(work, struct dm_snapshot, queued_bios_work);
609 struct bio *queued_bios; 610 struct bio *queued_bios;
610 unsigned long flags; 611 unsigned long flags;
611 612
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index f1db6eff4857..b3c01496c737 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -417,7 +417,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
417/* 417/*
418 * kcopyd does this every time it's woken up. 418 * kcopyd does this every time it's woken up.
419 */ 419 */
420static void do_work(void *ignored) 420static void do_work(struct work_struct *ignored)
421{ 421{
422 /* 422 /*
423 * The order that these are called is *very* important. 423 * The order that these are called is *very* important.
@@ -628,7 +628,7 @@ static int kcopyd_init(void)
628 } 628 }
629 629
630 kcopyd_clients++; 630 kcopyd_clients++;
631 INIT_WORK(&_kcopyd_work, do_work, NULL); 631 INIT_WORK(&_kcopyd_work, do_work);
632 mutex_unlock(&kcopyd_init_lock); 632 mutex_unlock(&kcopyd_init_lock);
633 return 0; 633 return 0;
634} 634}
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c
index 06893243f3d4..6e166801505d 100644
--- a/drivers/media/dvb/b2c2/flexcop-pci.c
+++ b/drivers/media/dvb/b2c2/flexcop-pci.c
@@ -63,7 +63,7 @@ struct flexcop_pci {
63 63
64 unsigned long last_irq; 64 unsigned long last_irq;
65 65
66 struct work_struct irq_check_work; 66 struct delayed_work irq_check_work;
67 67
68 struct flexcop_device *fc_dev; 68 struct flexcop_device *fc_dev;
69}; 69};
@@ -97,9 +97,10 @@ static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc, flexcop_ibi_regi
97 return 0; 97 return 0;
98} 98}
99 99
100static void flexcop_pci_irq_check_work(void *data) 100static void flexcop_pci_irq_check_work(struct work_struct *work)
101{ 101{
102 struct flexcop_pci *fc_pci = data; 102 struct flexcop_pci *fc_pci =
103 container_of(work, struct flexcop_pci, irq_check_work.work);
103 struct flexcop_device *fc = fc_pci->fc_dev; 104 struct flexcop_device *fc = fc_pci->fc_dev;
104 105
105 flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); 106 flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714);
@@ -371,7 +372,7 @@ static int flexcop_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
371 if ((ret = flexcop_pci_dma_init(fc_pci)) != 0) 372 if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
372 goto err_fc_exit; 373 goto err_fc_exit;
373 374
374 INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci); 375 INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
375 376
376 return ret; 377 return ret;
377 378
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index 8a7dd507cf6e..206c13e47a06 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -128,7 +128,7 @@ struct cinergyt2 {
128 128
129 struct dvbt_set_parameters_msg param; 129 struct dvbt_set_parameters_msg param;
130 struct dvbt_get_status_msg status; 130 struct dvbt_get_status_msg status;
131 struct work_struct query_work; 131 struct delayed_work query_work;
132 132
133 wait_queue_head_t poll_wq; 133 wait_queue_head_t poll_wq;
134 int pending_fe_events; 134 int pending_fe_events;
@@ -142,7 +142,7 @@ struct cinergyt2 {
142#ifdef ENABLE_RC 142#ifdef ENABLE_RC
143 struct input_dev *rc_input_dev; 143 struct input_dev *rc_input_dev;
144 char phys[64]; 144 char phys[64];
145 struct work_struct rc_query_work; 145 struct delayed_work rc_query_work;
146 int rc_input_event; 146 int rc_input_event;
147 u32 rc_last_code; 147 u32 rc_last_code;
148 unsigned long last_event_jiffies; 148 unsigned long last_event_jiffies;
@@ -723,9 +723,10 @@ static struct dvb_device cinergyt2_fe_template = {
723 723
724#ifdef ENABLE_RC 724#ifdef ENABLE_RC
725 725
726static void cinergyt2_query_rc (void *data) 726static void cinergyt2_query_rc (struct work_struct *work)
727{ 727{
728 struct cinergyt2 *cinergyt2 = data; 728 struct cinergyt2 *cinergyt2 =
729 container_of(work, struct cinergyt2, rc_query_work.work);
729 char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS }; 730 char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
730 struct cinergyt2_rc_event rc_events[12]; 731 struct cinergyt2_rc_event rc_events[12];
731 int n, len, i; 732 int n, len, i;
@@ -806,7 +807,7 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2)
806 strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys)); 807 strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
807 cinergyt2->rc_input_event = KEY_MAX; 808 cinergyt2->rc_input_event = KEY_MAX;
808 cinergyt2->rc_last_code = ~0; 809 cinergyt2->rc_last_code = ~0;
809 INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2); 810 INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
810 811
811 input_dev->name = DRIVER_NAME " remote control"; 812 input_dev->name = DRIVER_NAME " remote control";
812 input_dev->phys = cinergyt2->phys; 813 input_dev->phys = cinergyt2->phys;
@@ -847,9 +848,10 @@ static inline void cinergyt2_resume_rc(struct cinergyt2 *cinergyt2) { }
847 848
848#endif /* ENABLE_RC */ 849#endif /* ENABLE_RC */
849 850
850static void cinergyt2_query (void *data) 851static void cinergyt2_query (struct work_struct *work)
851{ 852{
852 struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data; 853 struct cinergyt2 *cinergyt2 =
854 container_of(work, struct cinergyt2, query_work.work);
853 char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS }; 855 char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
854 struct dvbt_get_status_msg *s = &cinergyt2->status; 856 struct dvbt_get_status_msg *s = &cinergyt2->status;
855 uint8_t lock_bits; 857 uint8_t lock_bits;
@@ -893,7 +895,7 @@ static int cinergyt2_probe (struct usb_interface *intf,
893 895
894 mutex_init(&cinergyt2->sem); 896 mutex_init(&cinergyt2->sem);
895 init_waitqueue_head (&cinergyt2->poll_wq); 897 init_waitqueue_head (&cinergyt2->poll_wq);
896 INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2); 898 INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
897 899
898 cinergyt2->udev = interface_to_usbdev(intf); 900 cinergyt2->udev = interface_to_usbdev(intf);
899 cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; 901 cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8859ab74f0fe..ebf4dc5190f6 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -127,6 +127,7 @@ struct dvb_net_priv {
127 int in_use; 127 int in_use;
128 struct net_device_stats stats; 128 struct net_device_stats stats;
129 u16 pid; 129 u16 pid;
130 struct net_device *net;
130 struct dvb_net *host; 131 struct dvb_net *host;
131 struct dmx_demux *demux; 132 struct dmx_demux *demux;
132 struct dmx_section_feed *secfeed; 133 struct dmx_section_feed *secfeed;
@@ -1123,10 +1124,11 @@ static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc)
1123} 1124}
1124 1125
1125 1126
1126static void wq_set_multicast_list (void *data) 1127static void wq_set_multicast_list (struct work_struct *work)
1127{ 1128{
1128 struct net_device *dev = data; 1129 struct dvb_net_priv *priv =
1129 struct dvb_net_priv *priv = dev->priv; 1130 container_of(work, struct dvb_net_priv, set_multicast_list_wq);
1131 struct net_device *dev = priv->net;
1130 1132
1131 dvb_net_feed_stop(dev); 1133 dvb_net_feed_stop(dev);
1132 priv->rx_mode = RX_MODE_UNI; 1134 priv->rx_mode = RX_MODE_UNI;
@@ -1167,9 +1169,11 @@ static void dvb_net_set_multicast_list (struct net_device *dev)
1167} 1169}
1168 1170
1169 1171
1170static void wq_restart_net_feed (void *data) 1172static void wq_restart_net_feed (struct work_struct *work)
1171{ 1173{
1172 struct net_device *dev = data; 1174 struct dvb_net_priv *priv =
1175 container_of(work, struct dvb_net_priv, restart_net_feed_wq);
1176 struct net_device *dev = priv->net;
1173 1177
1174 if (netif_running(dev)) { 1178 if (netif_running(dev)) {
1175 dvb_net_feed_stop(dev); 1179 dvb_net_feed_stop(dev);
@@ -1276,6 +1280,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
1276 dvbnet->device[if_num] = net; 1280 dvbnet->device[if_num] = net;
1277 1281
1278 priv = net->priv; 1282 priv = net->priv;
1283 priv->net = net;
1279 priv->demux = dvbnet->demux; 1284 priv->demux = dvbnet->demux;
1280 priv->pid = pid; 1285 priv->pid = pid;
1281 priv->rx_mode = RX_MODE_UNI; 1286 priv->rx_mode = RX_MODE_UNI;
@@ -1284,8 +1289,8 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
1284 priv->feedtype = feedtype; 1289 priv->feedtype = feedtype;
1285 reset_ule(priv); 1290 reset_ule(priv);
1286 1291
1287 INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net); 1292 INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
1288 INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net); 1293 INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
1289 mutex_init(&priv->mutex); 1294 mutex_init(&priv->mutex);
1290 1295
1291 net->base_addr = pid; 1296 net->base_addr = pid;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 0a3a0b6c2350..794e4471561c 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -13,9 +13,10 @@
13 * 13 *
14 * TODO: Fix the repeat rate of the input device. 14 * TODO: Fix the repeat rate of the input device.
15 */ 15 */
16static void dvb_usb_read_remote_control(void *data) 16static void dvb_usb_read_remote_control(struct work_struct *work)
17{ 17{
18 struct dvb_usb_device *d = data; 18 struct dvb_usb_device *d =
19 container_of(work, struct dvb_usb_device, rc_query_work.work);
19 u32 event; 20 u32 event;
20 int state; 21 int state;
21 22
@@ -128,7 +129,7 @@ int dvb_usb_remote_init(struct dvb_usb_device *d)
128 129
129 input_register_device(d->rc_input_dev); 130 input_register_device(d->rc_input_dev);
130 131
131 INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d); 132 INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control);
132 133
133 info("schedule remote query interval to %d msecs.", d->props.rc_interval); 134 info("schedule remote query interval to %d msecs.", d->props.rc_interval);
134 schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval)); 135 schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval));
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 376c45a8e779..0d721731a524 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -369,7 +369,7 @@ struct dvb_usb_device {
369 /* remote control */ 369 /* remote control */
370 struct input_dev *rc_input_dev; 370 struct input_dev *rc_input_dev;
371 char rc_phys[64]; 371 char rc_phys[64];
372 struct work_struct rc_query_work; 372 struct delayed_work rc_query_work;
373 u32 last_event; 373 u32 last_event;
374 int last_state; 374 int last_state;
375 375
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index 41f4b8d17559..b12cec94f4cc 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -82,6 +82,8 @@ struct pp_cam_entry {
82 struct pardevice *pdev; 82 struct pardevice *pdev;
83 struct parport *port; 83 struct parport *port;
84 struct work_struct cb_task; 84 struct work_struct cb_task;
85 void (*cb_func)(void *cbdata);
86 void *cb_data;
85 int open_count; 87 int open_count;
86 wait_queue_head_t wq_stream; 88 wait_queue_head_t wq_stream;
87 /* image state flags */ 89 /* image state flags */
@@ -130,6 +132,20 @@ static void cpia_parport_disable_irq( struct parport *port ) {
130#define PARPORT_CHUNK_SIZE PAGE_SIZE 132#define PARPORT_CHUNK_SIZE PAGE_SIZE
131 133
132 134
135static void cpia_pp_run_callback(struct work_struct *work)
136{
137 void (*cb_func)(void *cbdata);
138 void *cb_data;
139 struct pp_cam_entry *cam;
140
141 cam = container_of(work, struct pp_cam_entry, cb_task);
142 cb_func = cam->cb_func;
143 cb_data = cam->cb_data;
144 work_release(work);
145
146 cb_func(cb_data);
147}
148
133/**************************************************************************** 149/****************************************************************************
134 * 150 *
135 * CPiA-specific low-level parport functions for nibble uploads 151 * CPiA-specific low-level parport functions for nibble uploads
@@ -664,7 +680,9 @@ static int cpia_pp_registerCallback(void *privdata, void (*cb)(void *cbdata), vo
664 int retval = 0; 680 int retval = 0;
665 681
666 if(cam->port->irq != PARPORT_IRQ_NONE) { 682 if(cam->port->irq != PARPORT_IRQ_NONE) {
667 INIT_WORK(&cam->cb_task, cb, cbdata); 683 cam->cb_func = cb;
684 cam->cb_data = cbdata;
685 INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback);
668 } else { 686 } else {
669 retval = -1; 687 retval = -1;
670 } 688 }
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 57e1c024a547..e60a0a52e4b2 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -145,9 +145,9 @@ static void ir_timer(unsigned long data)
145 schedule_work(&ir->work); 145 schedule_work(&ir->work);
146} 146}
147 147
148static void cx88_ir_work(void *data) 148static void cx88_ir_work(struct work_struct *work)
149{ 149{
150 struct cx88_IR *ir = data; 150 struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
151 unsigned long timeout; 151 unsigned long timeout;
152 152
153 cx88_ir_handle_key(ir); 153 cx88_ir_handle_key(ir);
@@ -308,7 +308,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
308 core->ir = ir; 308 core->ir = ir;
309 309
310 if (ir->polling) { 310 if (ir->polling) {
311 INIT_WORK(&ir->work, cx88_ir_work, ir); 311 INIT_WORK(&ir->work, cx88_ir_work);
312 init_timer(&ir->timer); 312 init_timer(&ir->timer);
313 ir->timer.function = ir_timer; 313 ir->timer.function = ir_timer;
314 ir->timer.data = (unsigned long)ir; 314 ir->timer.data = (unsigned long)ir;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 1457b1602221..ab87e7bfe84f 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -268,9 +268,9 @@ static void ir_timer(unsigned long data)
268 schedule_work(&ir->work); 268 schedule_work(&ir->work);
269} 269}
270 270
271static void ir_work(void *data) 271static void ir_work(struct work_struct *work)
272{ 272{
273 struct IR_i2c *ir = data; 273 struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
274 ir_key_poll(ir); 274 ir_key_poll(ir);
275 mod_timer(&ir->timer, jiffies+HZ/10); 275 mod_timer(&ir->timer, jiffies+HZ/10);
276} 276}
@@ -400,7 +400,7 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
400 ir->input->name,ir->input->phys,adap->name); 400 ir->input->name,ir->input->phys,adap->name);
401 401
402 /* start polling via eventd */ 402 /* start polling via eventd */
403 INIT_WORK(&ir->work, ir_work, ir); 403 INIT_WORK(&ir->work, ir_work);
404 init_timer(&ir->timer); 404 init_timer(&ir->timer);
405 ir->timer.function = ir_timer; 405 ir->timer.function = ir_timer;
406 ir->timer.data = (unsigned long)ir; 406 ir->timer.data = (unsigned long)ir;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
index f129f316d20e..cf129746205d 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -45,16 +45,21 @@ static void pvr2_context_trigger_poll(struct pvr2_context *mp)
45} 45}
46 46
47 47
48static void pvr2_context_poll(struct pvr2_context *mp) 48static void pvr2_context_poll(struct work_struct *work)
49{ 49{
50 struct pvr2_context *mp =
51 container_of(work, struct pvr2_context, workpoll);
50 pvr2_context_enter(mp); do { 52 pvr2_context_enter(mp); do {
51 pvr2_hdw_poll(mp->hdw); 53 pvr2_hdw_poll(mp->hdw);
52 } while (0); pvr2_context_exit(mp); 54 } while (0); pvr2_context_exit(mp);
53} 55}
54 56
55 57
56static void pvr2_context_setup(struct pvr2_context *mp) 58static void pvr2_context_setup(struct work_struct *work)
57{ 59{
60 struct pvr2_context *mp =
61 container_of(work, struct pvr2_context, workinit);
62
58 pvr2_context_enter(mp); do { 63 pvr2_context_enter(mp); do {
59 if (!pvr2_hdw_dev_ok(mp->hdw)) break; 64 if (!pvr2_hdw_dev_ok(mp->hdw)) break;
60 pvr2_hdw_setup(mp->hdw); 65 pvr2_hdw_setup(mp->hdw);
@@ -92,8 +97,8 @@ struct pvr2_context *pvr2_context_create(
92 } 97 }
93 98
94 mp->workqueue = create_singlethread_workqueue("pvrusb2"); 99 mp->workqueue = create_singlethread_workqueue("pvrusb2");
95 INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp); 100 INIT_WORK(&mp->workinit, pvr2_context_setup);
96 INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp); 101 INIT_WORK(&mp->workpoll, pvr2_context_poll);
97 queue_work(mp->workqueue,&mp->workinit); 102 queue_work(mp->workqueue,&mp->workinit);
98 done: 103 done:
99 return mp; 104 return mp;
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index 7b9859c33018..92eabf88a09b 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -324,9 +324,9 @@ static void saa6588_timer(unsigned long data)
324 schedule_work(&s->work); 324 schedule_work(&s->work);
325} 325}
326 326
327static void saa6588_work(void *data) 327static void saa6588_work(struct work_struct *work)
328{ 328{
329 struct saa6588 *s = (struct saa6588 *)data; 329 struct saa6588 *s = container_of(work, struct saa6588, work);
330 330
331 saa6588_i2c_poll(s); 331 saa6588_i2c_poll(s);
332 mod_timer(&s->timer, jiffies + msecs_to_jiffies(20)); 332 mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
@@ -419,7 +419,7 @@ static int saa6588_attach(struct i2c_adapter *adap, int addr, int kind)
419 saa6588_configure(s); 419 saa6588_configure(s);
420 420
421 /* start polling via eventd */ 421 /* start polling via eventd */
422 INIT_WORK(&s->work, saa6588_work, s); 422 INIT_WORK(&s->work, saa6588_work);
423 init_timer(&s->timer); 423 init_timer(&s->timer);
424 s->timer.function = saa6588_timer; 424 s->timer.function = saa6588_timer;
425 s->timer.data = (unsigned long)s; 425 s->timer.data = (unsigned long)s;
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 65d044086ce9..daaae870a2c4 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -343,9 +343,10 @@ static struct video_device saa7134_empress_template =
343 .minor = -1, 343 .minor = -1,
344}; 344};
345 345
346static void empress_signal_update(void* data) 346static void empress_signal_update(struct work_struct *work)
347{ 347{
348 struct saa7134_dev* dev = (struct saa7134_dev*) data; 348 struct saa7134_dev* dev =
349 container_of(work, struct saa7134_dev, empress_workqueue);
349 350
350 if (dev->nosignal) { 351 if (dev->nosignal) {
351 dprintk("no video signal\n"); 352 dprintk("no video signal\n");
@@ -378,7 +379,7 @@ static int empress_init(struct saa7134_dev *dev)
378 "%s empress (%s)", dev->name, 379 "%s empress (%s)", dev->name,
379 saa7134_boards[dev->board].name); 380 saa7134_boards[dev->board].name);
380 381
381 INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev); 382 INIT_WORK(&dev->empress_workqueue, empress_signal_update);
382 383
383 err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, 384 err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
384 empress_nr[dev->nr]); 385 empress_nr[dev->nr]);
@@ -399,7 +400,7 @@ static int empress_init(struct saa7134_dev *dev)
399 sizeof(struct saa7134_buf), 400 sizeof(struct saa7134_buf),
400 dev); 401 dev);
401 402
402 empress_signal_update(dev); 403 empress_signal_update(&dev->empress_workqueue);
403 return 0; 404 return 0;
404} 405}
405 406
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 1dd491773150..ef2b55e19910 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1018,9 +1018,10 @@ mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum)
1018} 1018}
1019 1019
1020static void 1020static void
1021mptfc_setup_reset(void *arg) 1021mptfc_setup_reset(struct work_struct *work)
1022{ 1022{
1023 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 1023 MPT_ADAPTER *ioc =
1024 container_of(work, MPT_ADAPTER, fc_setup_reset_work);
1024 u64 pn; 1025 u64 pn;
1025 struct mptfc_rport_info *ri; 1026 struct mptfc_rport_info *ri;
1026 1027
@@ -1043,9 +1044,10 @@ mptfc_setup_reset(void *arg)
1043} 1044}
1044 1045
1045static void 1046static void
1046mptfc_rescan_devices(void *arg) 1047mptfc_rescan_devices(struct work_struct *work)
1047{ 1048{
1048 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 1049 MPT_ADAPTER *ioc =
1050 container_of(work, MPT_ADAPTER, fc_rescan_work);
1049 int ii; 1051 int ii;
1050 u64 pn; 1052 u64 pn;
1051 struct mptfc_rport_info *ri; 1053 struct mptfc_rport_info *ri;
@@ -1154,8 +1156,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1154 } 1156 }
1155 1157
1156 spin_lock_init(&ioc->fc_rescan_work_lock); 1158 spin_lock_init(&ioc->fc_rescan_work_lock);
1157 INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc); 1159 INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
1158 INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc); 1160 INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
1159 1161
1160 spin_lock_irqsave(&ioc->FreeQlock, flags); 1162 spin_lock_irqsave(&ioc->FreeQlock, flags);
1161 1163
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 314c3a27585d..b7c4407c5e3f 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -111,7 +111,8 @@ struct mpt_lan_priv {
111 u32 total_received; 111 u32 total_received;
112 struct net_device_stats stats; /* Per device statistics */ 112 struct net_device_stats stats; /* Per device statistics */
113 113
114 struct work_struct post_buckets_task; 114 struct delayed_work post_buckets_task;
115 struct net_device *dev;
115 unsigned long post_buckets_active; 116 unsigned long post_buckets_active;
116}; 117};
117 118
@@ -132,7 +133,7 @@ static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
132static int mpt_lan_open(struct net_device *dev); 133static int mpt_lan_open(struct net_device *dev);
133static int mpt_lan_reset(struct net_device *dev); 134static int mpt_lan_reset(struct net_device *dev);
134static int mpt_lan_close(struct net_device *dev); 135static int mpt_lan_close(struct net_device *dev);
135static void mpt_lan_post_receive_buckets(void *dev_id); 136static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
136static void mpt_lan_wake_post_buckets_task(struct net_device *dev, 137static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
137 int priority); 138 int priority);
138static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); 139static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
@@ -345,7 +346,7 @@ mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
345 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; 346 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
346 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 347 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
347 } else { 348 } else {
348 mpt_lan_post_receive_buckets(dev); 349 mpt_lan_post_receive_buckets(priv);
349 netif_wake_queue(dev); 350 netif_wake_queue(dev);
350 } 351 }
351 352
@@ -441,7 +442,7 @@ mpt_lan_open(struct net_device *dev)
441 442
442 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); 443 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
443 444
444 mpt_lan_post_receive_buckets(dev); 445 mpt_lan_post_receive_buckets(priv);
445 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", 446 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
446 IOC_AND_NETDEV_NAMES_s_s(dev)); 447 IOC_AND_NETDEV_NAMES_s_s(dev));
447 448
@@ -854,7 +855,7 @@ mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
854 855
855 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { 856 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
856 if (priority) { 857 if (priority) {
857 schedule_work(&priv->post_buckets_task); 858 schedule_delayed_work(&priv->post_buckets_task, 0);
858 } else { 859 } else {
859 schedule_delayed_work(&priv->post_buckets_task, 1); 860 schedule_delayed_work(&priv->post_buckets_task, 1);
860 dioprintk((KERN_INFO MYNAM ": post_buckets queued on " 861 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
@@ -1188,10 +1189,9 @@ mpt_lan_receive_post_reply(struct net_device *dev,
1188/* Simple SGE's only at the moment */ 1189/* Simple SGE's only at the moment */
1189 1190
1190static void 1191static void
1191mpt_lan_post_receive_buckets(void *dev_id) 1192mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1192{ 1193{
1193 struct net_device *dev = dev_id; 1194 struct net_device *dev = priv->dev;
1194 struct mpt_lan_priv *priv = dev->priv;
1195 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 1195 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1196 MPT_FRAME_HDR *mf; 1196 MPT_FRAME_HDR *mf;
1197 LANReceivePostRequest_t *pRecvReq; 1197 LANReceivePostRequest_t *pRecvReq;
@@ -1335,6 +1335,13 @@ out:
1335 clear_bit(0, &priv->post_buckets_active); 1335 clear_bit(0, &priv->post_buckets_active);
1336} 1336}
1337 1337
1338static void
1339mpt_lan_post_receive_buckets_work(struct work_struct *work)
1340{
1341 mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1342 post_buckets_task.work));
1343}
1344
1338/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1345/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1339static struct net_device * 1346static struct net_device *
1340mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) 1347mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
@@ -1350,11 +1357,13 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1350 1357
1351 priv = netdev_priv(dev); 1358 priv = netdev_priv(dev);
1352 1359
1360 priv->dev = dev;
1353 priv->mpt_dev = mpt_dev; 1361 priv->mpt_dev = mpt_dev;
1354 priv->pnum = pnum; 1362 priv->pnum = pnum;
1355 1363
1356 memset(&priv->post_buckets_task, 0, sizeof(struct work_struct)); 1364 memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
1357 INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev); 1365 INIT_DELAYED_WORK(&priv->post_buckets_task,
1366 mpt_lan_post_receive_buckets_work);
1358 priv->post_buckets_active = 0; 1367 priv->post_buckets_active = 0;
1359 1368
1360 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", 1369 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b752a479f6db..4f0c530e47b0 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2006,9 +2006,10 @@ __mptsas_discovery_work(MPT_ADAPTER *ioc)
2006 *(Mutex LOCKED) 2006 *(Mutex LOCKED)
2007 */ 2007 */
2008static void 2008static void
2009mptsas_discovery_work(void * arg) 2009mptsas_discovery_work(struct work_struct *work)
2010{ 2010{
2011 struct mptsas_discovery_event *ev = arg; 2011 struct mptsas_discovery_event *ev =
2012 container_of(work, struct mptsas_discovery_event, work);
2012 MPT_ADAPTER *ioc = ev->ioc; 2013 MPT_ADAPTER *ioc = ev->ioc;
2013 2014
2014 mutex_lock(&ioc->sas_discovery_mutex); 2015 mutex_lock(&ioc->sas_discovery_mutex);
@@ -2068,9 +2069,9 @@ mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id)
2068 * Work queue thread to clear the persitency table 2069 * Work queue thread to clear the persitency table
2069 */ 2070 */
2070static void 2071static void
2071mptsas_persist_clear_table(void * arg) 2072mptsas_persist_clear_table(struct work_struct *work)
2072{ 2073{
2073 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 2074 MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
2074 2075
2075 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); 2076 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2076} 2077}
@@ -2093,9 +2094,10 @@ mptsas_reprobe_target(struct scsi_target *starget, int uld_attach)
2093 * Work queue thread to handle SAS hotplug events 2094 * Work queue thread to handle SAS hotplug events
2094 */ 2095 */
2095static void 2096static void
2096mptsas_hotplug_work(void *arg) 2097mptsas_hotplug_work(struct work_struct *work)
2097{ 2098{
2098 struct mptsas_hotplug_event *ev = arg; 2099 struct mptsas_hotplug_event *ev =
2100 container_of(work, struct mptsas_hotplug_event, work);
2099 MPT_ADAPTER *ioc = ev->ioc; 2101 MPT_ADAPTER *ioc = ev->ioc;
2100 struct mptsas_phyinfo *phy_info; 2102 struct mptsas_phyinfo *phy_info;
2101 struct sas_rphy *rphy; 2103 struct sas_rphy *rphy;
@@ -2341,7 +2343,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc,
2341 break; 2343 break;
2342 } 2344 }
2343 2345
2344 INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 2346 INIT_WORK(&ev->work, mptsas_hotplug_work);
2345 ev->ioc = ioc; 2347 ev->ioc = ioc;
2346 ev->handle = le16_to_cpu(sas_event_data->DevHandle); 2348 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
2347 ev->parent_handle = 2349 ev->parent_handle =
@@ -2366,7 +2368,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc,
2366 * Persistent table is full. 2368 * Persistent table is full.
2367 */ 2369 */
2368 INIT_WORK(&ioc->sas_persist_task, 2370 INIT_WORK(&ioc->sas_persist_task,
2369 mptsas_persist_clear_table, (void *)ioc); 2371 mptsas_persist_clear_table);
2370 schedule_work(&ioc->sas_persist_task); 2372 schedule_work(&ioc->sas_persist_task);
2371 break; 2373 break;
2372 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 2374 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
@@ -2395,7 +2397,7 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
2395 return; 2397 return;
2396 } 2398 }
2397 2399
2398 INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 2400 INIT_WORK(&ev->work, mptsas_hotplug_work);
2399 ev->ioc = ioc; 2401 ev->ioc = ioc;
2400 ev->id = raid_event_data->VolumeID; 2402 ev->id = raid_event_data->VolumeID;
2401 ev->event_type = MPTSAS_IGNORE_EVENT; 2403 ev->event_type = MPTSAS_IGNORE_EVENT;
@@ -2474,7 +2476,7 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
2474 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 2476 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2475 if (!ev) 2477 if (!ev)
2476 return; 2478 return;
2477 INIT_WORK(&ev->work, mptsas_discovery_work, ev); 2479 INIT_WORK(&ev->work, mptsas_discovery_work);
2478 ev->ioc = ioc; 2480 ev->ioc = ioc;
2479 schedule_work(&ev->work); 2481 schedule_work(&ev->work);
2480}; 2482};
@@ -2511,8 +2513,7 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
2511 break; 2513 break;
2512 case MPI_EVENT_PERSISTENT_TABLE_FULL: 2514 case MPI_EVENT_PERSISTENT_TABLE_FULL:
2513 INIT_WORK(&ioc->sas_persist_task, 2515 INIT_WORK(&ioc->sas_persist_task,
2514 mptsas_persist_clear_table, 2516 mptsas_persist_clear_table);
2515 (void *)ioc);
2516 schedule_work(&ioc->sas_persist_task); 2517 schedule_work(&ioc->sas_persist_task);
2517 break; 2518 break;
2518 case MPI_EVENT_SAS_DISCOVERY: 2519 case MPI_EVENT_SAS_DISCOVERY:
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e4cc3dd5fc9f..f422c0d0621c 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -646,9 +646,10 @@ struct work_queue_wrapper {
646 int disk; 646 int disk;
647}; 647};
648 648
649static void mpt_work_wrapper(void *data) 649static void mpt_work_wrapper(struct work_struct *work)
650{ 650{
651 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 651 struct work_queue_wrapper *wqw =
652 container_of(work, struct work_queue_wrapper, work);
652 struct _MPT_SCSI_HOST *hd = wqw->hd; 653 struct _MPT_SCSI_HOST *hd = wqw->hd;
653 struct Scsi_Host *shost = hd->ioc->sh; 654 struct Scsi_Host *shost = hd->ioc->sh;
654 struct scsi_device *sdev; 655 struct scsi_device *sdev;
@@ -695,7 +696,7 @@ static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk)
695 disk); 696 disk);
696 return; 697 return;
697 } 698 }
698 INIT_WORK(&wqw->work, mpt_work_wrapper, wqw); 699 INIT_WORK(&wqw->work, mpt_work_wrapper);
699 wqw->hd = hd; 700 wqw->hd = hd;
700 wqw->disk = disk; 701 wqw->disk = disk;
701 702
@@ -784,9 +785,10 @@ MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
784 * renegotiate for a given target 785 * renegotiate for a given target
785 */ 786 */
786static void 787static void
787mptspi_dv_renegotiate_work(void *data) 788mptspi_dv_renegotiate_work(struct work_struct *work)
788{ 789{
789 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 790 struct work_queue_wrapper *wqw =
791 container_of(work, struct work_queue_wrapper, work);
790 struct _MPT_SCSI_HOST *hd = wqw->hd; 792 struct _MPT_SCSI_HOST *hd = wqw->hd;
791 struct scsi_device *sdev; 793 struct scsi_device *sdev;
792 794
@@ -804,7 +806,7 @@ mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
804 if (!wqw) 806 if (!wqw)
805 return; 807 return;
806 808
807 INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); 809 INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
808 wqw->hd = hd; 810 wqw->hd = hd;
809 811
810 schedule_work(&wqw->work); 812 schedule_work(&wqw->work);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 64130227574f..7fc7399bd2ec 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -232,7 +232,7 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
232 break; 232 break;
233 } 233 }
234 234
235 INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt); 235 INIT_WORK(&evt->work, drv->event);
236 queue_work(drv->event_queue, &evt->work); 236 queue_work(drv->event_queue, &evt->work);
237 return 1; 237 return 1;
238 } 238 }
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index a2350640384b..9e529d8dd5cb 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -371,8 +371,10 @@ static int i2o_exec_remove(struct device *dev)
371 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY 371 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
372 * again, otherwise send LCT NOTIFY to get informed on next LCT change. 372 * again, otherwise send LCT NOTIFY to get informed on next LCT change.
373 */ 373 */
374static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work) 374static void i2o_exec_lct_modified(struct work_struct *_work)
375{ 375{
376 struct i2o_exec_lct_notify_work *work =
377 container_of(_work, struct i2o_exec_lct_notify_work, work);
376 u32 change_ind = 0; 378 u32 change_ind = 0;
377 struct i2o_controller *c = work->c; 379 struct i2o_controller *c = work->c;
378 380
@@ -439,8 +441,7 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
439 441
440 work->c = c; 442 work->c = c;
441 443
442 INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified, 444 INIT_WORK(&work->work, i2o_exec_lct_modified);
443 work);
444 queue_work(i2o_exec_driver.event_queue, &work->work); 445 queue_work(i2o_exec_driver.event_queue, &work->work);
445 return 1; 446 return 1;
446 } 447 }
@@ -460,13 +461,15 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
460 461
461/** 462/**
462 * i2o_exec_event - Event handling function 463 * i2o_exec_event - Event handling function
463 * @evt: Event which occurs 464 * @work: Work item in occurring event
464 * 465 *
465 * Handles events send by the Executive device. At the moment does not do 466 * Handles events send by the Executive device. At the moment does not do
466 * anything useful. 467 * anything useful.
467 */ 468 */
468static void i2o_exec_event(struct i2o_event *evt) 469static void i2o_exec_event(struct work_struct *work)
469{ 470{
471 struct i2o_event *evt = container_of(work, struct i2o_event, work);
472
470 if (likely(evt->i2o_dev)) 473 if (likely(evt->i2o_dev))
471 osm_debug("Event received from device: %d\n", 474 osm_debug("Event received from device: %d\n",
472 evt->i2o_dev->lct_data.tid); 475 evt->i2o_dev->lct_data.tid);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index eaba81bf2eca..70ae00253321 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -419,16 +419,18 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
419 419
420/** 420/**
421 * i2o_block_delayed_request_fn - delayed request queue function 421 * i2o_block_delayed_request_fn - delayed request queue function
422 * delayed_request: the delayed request with the queue to start 422 * @work: the delayed request with the queue to start
423 * 423 *
424 * If the request queue is stopped for a disk, and there is no open 424 * If the request queue is stopped for a disk, and there is no open
425 * request, a new event is created, which calls this function to start 425 * request, a new event is created, which calls this function to start
426 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never 426 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
427 * be started again. 427 * be started again.
428 */ 428 */
429static void i2o_block_delayed_request_fn(void *delayed_request) 429static void i2o_block_delayed_request_fn(struct work_struct *work)
430{ 430{
431 struct i2o_block_delayed_request *dreq = delayed_request; 431 struct i2o_block_delayed_request *dreq =
432 container_of(work, struct i2o_block_delayed_request,
433 work.work);
432 struct request_queue *q = dreq->queue; 434 struct request_queue *q = dreq->queue;
433 unsigned long flags; 435 unsigned long flags;
434 436
@@ -538,8 +540,9 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
538 return 1; 540 return 1;
539}; 541};
540 542
541static void i2o_block_event(struct i2o_event *evt) 543static void i2o_block_event(struct work_struct *work)
542{ 544{
545 struct i2o_event *evt = container_of(work, struct i2o_event, work);
543 osm_debug("event received\n"); 546 osm_debug("event received\n");
544 kfree(evt); 547 kfree(evt);
545}; 548};
@@ -938,8 +941,8 @@ static void i2o_block_request_fn(struct request_queue *q)
938 continue; 941 continue;
939 942
940 dreq->queue = q; 943 dreq->queue = q;
941 INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, 944 INIT_DELAYED_WORK(&dreq->work,
942 dreq); 945 i2o_block_delayed_request_fn);
943 946
944 if (!queue_delayed_work(i2o_block_driver.event_queue, 947 if (!queue_delayed_work(i2o_block_driver.event_queue,
945 &dreq->work, 948 &dreq->work,
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
index 4fdaa5bda412..d9fdc95b440d 100644
--- a/drivers/message/i2o/i2o_block.h
+++ b/drivers/message/i2o/i2o_block.h
@@ -96,7 +96,7 @@ struct i2o_block_request {
96 96
97/* I2O Block device delayed request */ 97/* I2O Block device delayed request */
98struct i2o_block_delayed_request { 98struct i2o_block_delayed_request {
99 struct work_struct work; 99 struct delayed_work work;
100 struct request_queue *queue; 100 struct request_queue *queue;
101}; 101};
102 102
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 1ba8754e9383..2ab7add78f94 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -33,9 +33,10 @@ static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
33 spin_unlock_irqrestore(&fm->lock, flags); 33 spin_unlock_irqrestore(&fm->lock, flags);
34} 34}
35 35
36static void tifm_7xx1_remove_media(void *adapter) 36static void tifm_7xx1_remove_media(struct work_struct *work)
37{ 37{
38 struct tifm_adapter *fm = adapter; 38 struct tifm_adapter *fm =
39 container_of(work, struct tifm_adapter, media_remover);
39 unsigned long flags; 40 unsigned long flags;
40 int cnt; 41 int cnt;
41 struct tifm_dev *sock; 42 struct tifm_dev *sock;
@@ -169,9 +170,10 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
169 return base_addr + ((sock_num + 1) << 10); 170 return base_addr + ((sock_num + 1) << 10);
170} 171}
171 172
172static void tifm_7xx1_insert_media(void *adapter) 173static void tifm_7xx1_insert_media(struct work_struct *work)
173{ 174{
174 struct tifm_adapter *fm = adapter; 175 struct tifm_adapter *fm =
176 container_of(work, struct tifm_adapter, media_inserter);
175 unsigned long flags; 177 unsigned long flags;
176 tifm_media_id media_id; 178 tifm_media_id media_id;
177 char *card_name = "xx"; 179 char *card_name = "xx";
@@ -261,7 +263,7 @@ static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
261 spin_unlock_irqrestore(&fm->lock, flags); 263 spin_unlock_irqrestore(&fm->lock, flags);
262 flush_workqueue(fm->wq); 264 flush_workqueue(fm->wq);
263 265
264 tifm_7xx1_remove_media(fm); 266 tifm_7xx1_remove_media(&fm->media_remover);
265 267
266 pci_set_power_state(dev, PCI_D3hot); 268 pci_set_power_state(dev, PCI_D3hot);
267 pci_disable_device(dev); 269 pci_disable_device(dev);
@@ -328,8 +330,8 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
328 if (!fm->sockets) 330 if (!fm->sockets)
329 goto err_out_free; 331 goto err_out_free;
330 332
331 INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm); 333 INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
332 INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm); 334 INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
333 fm->eject = tifm_7xx1_eject; 335 fm->eject = tifm_7xx1_eject;
334 pci_set_drvdata(dev, fm); 336 pci_set_drvdata(dev, fm);
335 337
@@ -384,7 +386,7 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
384 386
385 flush_workqueue(fm->wq); 387 flush_workqueue(fm->wq);
386 388
387 tifm_7xx1_remove_media(fm); 389 tifm_7xx1_remove_media(&fm->media_remover);
388 390
389 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 391 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
390 free_irq(dev->irq, fm); 392 free_irq(dev->irq, fm);
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 9d190022a490..6f2a282e2b97 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -1419,18 +1419,16 @@ static void mmc_setup(struct mmc_host *host)
1419 */ 1419 */
1420void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1420void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1421{ 1421{
1422 if (delay) 1422 mmc_schedule_delayed_work(&host->detect, delay);
1423 mmc_schedule_delayed_work(&host->detect, delay);
1424 else
1425 mmc_schedule_work(&host->detect);
1426} 1423}
1427 1424
1428EXPORT_SYMBOL(mmc_detect_change); 1425EXPORT_SYMBOL(mmc_detect_change);
1429 1426
1430 1427
1431static void mmc_rescan(void *data) 1428static void mmc_rescan(struct work_struct *work)
1432{ 1429{
1433 struct mmc_host *host = data; 1430 struct mmc_host *host =
1431 container_of(work, struct mmc_host, detect.work);
1434 struct list_head *l, *n; 1432 struct list_head *l, *n;
1435 unsigned char power_mode; 1433 unsigned char power_mode;
1436 1434
@@ -1513,7 +1511,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
1513 spin_lock_init(&host->lock); 1511 spin_lock_init(&host->lock);
1514 init_waitqueue_head(&host->wq); 1512 init_waitqueue_head(&host->wq);
1515 INIT_LIST_HEAD(&host->cards); 1513 INIT_LIST_HEAD(&host->cards);
1516 INIT_WORK(&host->detect, mmc_rescan, host); 1514 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
1517 1515
1518 /* 1516 /*
1519 * By default, hosts do not support SGIO or large requests. 1517 * By default, hosts do not support SGIO or large requests.
@@ -1611,7 +1609,7 @@ EXPORT_SYMBOL(mmc_suspend_host);
1611 */ 1609 */
1612int mmc_resume_host(struct mmc_host *host) 1610int mmc_resume_host(struct mmc_host *host)
1613{ 1611{
1614 mmc_rescan(host); 1612 mmc_rescan(&host->detect.work);
1615 1613
1616 return 0; 1614 return 0;
1617} 1615}
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
index cd5e0ab3d84b..149affe0b686 100644
--- a/drivers/mmc/mmc.h
+++ b/drivers/mmc/mmc.h
@@ -20,6 +20,6 @@ void mmc_remove_host_sysfs(struct mmc_host *host);
20void mmc_free_host_sysfs(struct mmc_host *host); 20void mmc_free_host_sysfs(struct mmc_host *host);
21 21
22int mmc_schedule_work(struct work_struct *work); 22int mmc_schedule_work(struct work_struct *work);
23int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay); 23int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay);
24void mmc_flush_scheduled_work(void); 24void mmc_flush_scheduled_work(void);
25#endif 25#endif
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index ac5329636045..e334acd045bc 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -321,17 +321,9 @@ void mmc_free_host_sysfs(struct mmc_host *host)
321static struct workqueue_struct *workqueue; 321static struct workqueue_struct *workqueue;
322 322
323/* 323/*
324 * Internal function. Schedule work in the MMC work queue.
325 */
326int mmc_schedule_work(struct work_struct *work)
327{
328 return queue_work(workqueue, work);
329}
330
331/*
332 * Internal function. Schedule delayed work in the MMC work queue. 324 * Internal function. Schedule delayed work in the MMC work queue.
333 */ 325 */
334int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay) 326int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
335{ 327{
336 return queue_delayed_work(workqueue, work, delay); 328 return queue_delayed_work(workqueue, work, delay);
337} 329}
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index 0fdc55b08a6d..e846499a004c 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -99,7 +99,7 @@ struct tifm_sd {
99 99
100 struct mmc_request *req; 100 struct mmc_request *req;
101 struct work_struct cmd_handler; 101 struct work_struct cmd_handler;
102 struct work_struct abort_handler; 102 struct delayed_work abort_handler;
103 wait_queue_head_t can_eject; 103 wait_queue_head_t can_eject;
104 104
105 size_t written_blocks; 105 size_t written_blocks;
@@ -496,9 +496,9 @@ err_out:
496 mmc_request_done(mmc, mrq); 496 mmc_request_done(mmc, mrq);
497} 497}
498 498
499static void tifm_sd_end_cmd(void *data) 499static void tifm_sd_end_cmd(struct work_struct *work)
500{ 500{
501 struct tifm_sd *host = data; 501 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
502 struct tifm_dev *sock = host->dev; 502 struct tifm_dev *sock = host->dev;
503 struct mmc_host *mmc = tifm_get_drvdata(sock); 503 struct mmc_host *mmc = tifm_get_drvdata(sock);
504 struct mmc_request *mrq; 504 struct mmc_request *mrq;
@@ -608,9 +608,9 @@ err_out:
608 mmc_request_done(mmc, mrq); 608 mmc_request_done(mmc, mrq);
609} 609}
610 610
611static void tifm_sd_end_cmd_nodma(void *data) 611static void tifm_sd_end_cmd_nodma(struct work_struct *work)
612{ 612{
613 struct tifm_sd *host = (struct tifm_sd*)data; 613 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
614 struct tifm_dev *sock = host->dev; 614 struct tifm_dev *sock = host->dev;
615 struct mmc_host *mmc = tifm_get_drvdata(sock); 615 struct mmc_host *mmc = tifm_get_drvdata(sock);
616 struct mmc_request *mrq; 616 struct mmc_request *mrq;
@@ -661,11 +661,14 @@ static void tifm_sd_end_cmd_nodma(void *data)
661 mmc_request_done(mmc, mrq); 661 mmc_request_done(mmc, mrq);
662} 662}
663 663
664static void tifm_sd_abort(void *data) 664static void tifm_sd_abort(struct work_struct *work)
665{ 665{
666 struct tifm_sd *host =
667 container_of(work, struct tifm_sd, abort_handler.work);
668
666 printk(KERN_ERR DRIVER_NAME 669 printk(KERN_ERR DRIVER_NAME
667 ": card failed to respond for a long period of time"); 670 ": card failed to respond for a long period of time");
668 tifm_eject(((struct tifm_sd*)data)->dev); 671 tifm_eject(host->dev);
669} 672}
670 673
671static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) 674static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -762,9 +765,9 @@ static struct mmc_host_ops tifm_sd_ops = {
762 .get_ro = tifm_sd_ro 765 .get_ro = tifm_sd_ro
763}; 766};
764 767
765static void tifm_sd_register_host(void *data) 768static void tifm_sd_register_host(struct work_struct *work)
766{ 769{
767 struct tifm_sd *host = (struct tifm_sd*)data; 770 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
768 struct tifm_dev *sock = host->dev; 771 struct tifm_dev *sock = host->dev;
769 struct mmc_host *mmc = tifm_get_drvdata(sock); 772 struct mmc_host *mmc = tifm_get_drvdata(sock);
770 unsigned long flags; 773 unsigned long flags;
@@ -772,8 +775,7 @@ static void tifm_sd_register_host(void *data)
772 spin_lock_irqsave(&sock->lock, flags); 775 spin_lock_irqsave(&sock->lock, flags);
773 host->flags |= HOST_REG; 776 host->flags |= HOST_REG;
774 PREPARE_WORK(&host->cmd_handler, 777 PREPARE_WORK(&host->cmd_handler,
775 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd, 778 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd);
776 data);
777 spin_unlock_irqrestore(&sock->lock, flags); 779 spin_unlock_irqrestore(&sock->lock, flags);
778 dev_dbg(&sock->dev, "adding host\n"); 780 dev_dbg(&sock->dev, "adding host\n");
779 mmc_add_host(mmc); 781 mmc_add_host(mmc);
@@ -799,8 +801,8 @@ static int tifm_sd_probe(struct tifm_dev *sock)
799 host->dev = sock; 801 host->dev = sock;
800 host->clk_div = 61; 802 host->clk_div = 61;
801 init_waitqueue_head(&host->can_eject); 803 init_waitqueue_head(&host->can_eject);
802 INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host); 804 INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
803 INIT_WORK(&host->abort_handler, tifm_sd_abort, host); 805 INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
804 806
805 tifm_set_drvdata(sock, mmc); 807 tifm_set_drvdata(sock, mmc);
806 sock->signal_irq = tifm_sd_signal_irq; 808 sock->signal_irq = tifm_sd_signal_irq;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index d02ed51abfcc..931028f672de 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -594,7 +594,7 @@ struct rtl8139_private {
594 u32 rx_config; 594 u32 rx_config;
595 struct rtl_extra_stats xstats; 595 struct rtl_extra_stats xstats;
596 596
597 struct work_struct thread; 597 struct delayed_work thread;
598 598
599 struct mii_if_info mii; 599 struct mii_if_info mii;
600 unsigned int regs_len; 600 unsigned int regs_len;
@@ -636,8 +636,8 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
636static void rtl8139_set_rx_mode (struct net_device *dev); 636static void rtl8139_set_rx_mode (struct net_device *dev);
637static void __set_rx_mode (struct net_device *dev); 637static void __set_rx_mode (struct net_device *dev);
638static void rtl8139_hw_start (struct net_device *dev); 638static void rtl8139_hw_start (struct net_device *dev);
639static void rtl8139_thread (void *_data); 639static void rtl8139_thread (struct work_struct *work);
640static void rtl8139_tx_timeout_task(void *_data); 640static void rtl8139_tx_timeout_task(struct work_struct *work);
641static const struct ethtool_ops rtl8139_ethtool_ops; 641static const struct ethtool_ops rtl8139_ethtool_ops;
642 642
643/* write MMIO register, with flush */ 643/* write MMIO register, with flush */
@@ -1010,7 +1010,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1010 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); 1010 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
1011 spin_lock_init (&tp->lock); 1011 spin_lock_init (&tp->lock);
1012 spin_lock_init (&tp->rx_lock); 1012 spin_lock_init (&tp->rx_lock);
1013 INIT_WORK(&tp->thread, rtl8139_thread, dev); 1013 INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
1014 tp->mii.dev = dev; 1014 tp->mii.dev = dev;
1015 tp->mii.mdio_read = mdio_read; 1015 tp->mii.mdio_read = mdio_read;
1016 tp->mii.mdio_write = mdio_write; 1016 tp->mii.mdio_write = mdio_write;
@@ -1596,15 +1596,16 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
1596 RTL_R8 (Config1)); 1596 RTL_R8 (Config1));
1597} 1597}
1598 1598
1599static void rtl8139_thread (void *_data) 1599static void rtl8139_thread (struct work_struct *work)
1600{ 1600{
1601 struct net_device *dev = _data; 1601 struct rtl8139_private *tp =
1602 struct rtl8139_private *tp = netdev_priv(dev); 1602 container_of(work, struct rtl8139_private, thread.work);
1603 struct net_device *dev = tp->mii.dev;
1603 unsigned long thr_delay = next_tick; 1604 unsigned long thr_delay = next_tick;
1604 1605
1605 if (tp->watchdog_fired) { 1606 if (tp->watchdog_fired) {
1606 tp->watchdog_fired = 0; 1607 tp->watchdog_fired = 0;
1607 rtl8139_tx_timeout_task(_data); 1608 rtl8139_tx_timeout_task(work);
1608 } else if (rtnl_trylock()) { 1609 } else if (rtnl_trylock()) {
1609 rtl8139_thread_iter (dev, tp, tp->mmio_addr); 1610 rtl8139_thread_iter (dev, tp, tp->mmio_addr);
1610 rtnl_unlock (); 1611 rtnl_unlock ();
@@ -1646,10 +1647,11 @@ static inline void rtl8139_tx_clear (struct rtl8139_private *tp)
1646 /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ 1647 /* XXX account for unsent Tx packets in tp->stats.tx_dropped */
1647} 1648}
1648 1649
1649static void rtl8139_tx_timeout_task (void *_data) 1650static void rtl8139_tx_timeout_task (struct work_struct *work)
1650{ 1651{
1651 struct net_device *dev = _data; 1652 struct rtl8139_private *tp =
1652 struct rtl8139_private *tp = netdev_priv(dev); 1653 container_of(work, struct rtl8139_private, thread.work);
1654 struct net_device *dev = tp->mii.dev;
1653 void __iomem *ioaddr = tp->mmio_addr; 1655 void __iomem *ioaddr = tp->mmio_addr;
1654 int i; 1656 int i;
1655 u8 tmp8; 1657 u8 tmp8;
@@ -1695,7 +1697,7 @@ static void rtl8139_tx_timeout (struct net_device *dev)
1695 struct rtl8139_private *tp = netdev_priv(dev); 1697 struct rtl8139_private *tp = netdev_priv(dev);
1696 1698
1697 if (!tp->have_thread) { 1699 if (!tp->have_thread) {
1698 INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev); 1700 INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task);
1699 schedule_delayed_work(&tp->thread, next_tick); 1701 schedule_delayed_work(&tp->thread, next_tick);
1700 } else 1702 } else
1701 tp->watchdog_fired = 1; 1703 tp->watchdog_fired = 1;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index fc2f1d1c7ead..5bacb7587df4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -4411,9 +4411,9 @@ bnx2_open(struct net_device *dev)
4411} 4411}
4412 4412
4413static void 4413static void
4414bnx2_reset_task(void *data) 4414bnx2_reset_task(struct work_struct *work)
4415{ 4415{
4416 struct bnx2 *bp = data; 4416 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4417 4417
4418 if (!netif_running(bp->dev)) 4418 if (!netif_running(bp->dev))
4419 return; 4419 return;
@@ -5702,7 +5702,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5702 bp->pdev = pdev; 5702 bp->pdev = pdev;
5703 5703
5704 spin_lock_init(&bp->phy_lock); 5704 spin_lock_init(&bp->phy_lock);
5705 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); 5705 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5706 5706
5707 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 5707 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5708 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); 5708 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index fd2cc13f7d97..c8126484c2be 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4066,9 +4066,9 @@ static int cas_alloc_rxds(struct cas *cp)
4066 return 0; 4066 return 0;
4067} 4067}
4068 4068
4069static void cas_reset_task(void *data) 4069static void cas_reset_task(struct work_struct *work)
4070{ 4070{
4071 struct cas *cp = (struct cas *) data; 4071 struct cas *cp = container_of(work, struct cas, reset_task);
4072#if 0 4072#if 0
4073 int pending = atomic_read(&cp->reset_task_pending); 4073 int pending = atomic_read(&cp->reset_task_pending);
4074#else 4074#else
@@ -5006,7 +5006,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5006 atomic_set(&cp->reset_task_pending_spare, 0); 5006 atomic_set(&cp->reset_task_pending_spare, 0);
5007 atomic_set(&cp->reset_task_pending_mtu, 0); 5007 atomic_set(&cp->reset_task_pending_mtu, 0);
5008#endif 5008#endif
5009 INIT_WORK(&cp->reset_task, cas_reset_task, cp); 5009 INIT_WORK(&cp->reset_task, cas_reset_task);
5010 5010
5011 /* Default link parameters */ 5011 /* Default link parameters */
5012 if (link_mode >= 0 && link_mode <= 6) 5012 if (link_mode >= 0 && link_mode <= 6)
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index b265941e1372..74758d2c7af8 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -279,7 +279,7 @@ struct adapter {
279 struct petp *tp; 279 struct petp *tp;
280 280
281 struct port_info port[MAX_NPORTS]; 281 struct port_info port[MAX_NPORTS];
282 struct work_struct stats_update_task; 282 struct delayed_work stats_update_task;
283 struct timer_list stats_update_timer; 283 struct timer_list stats_update_timer;
284 284
285 spinlock_t tpi_lock; 285 spinlock_t tpi_lock;
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
index 60901f25014e..cf9143499882 100644
--- a/drivers/net/chelsio/cphy.h
+++ b/drivers/net/chelsio/cphy.h
@@ -91,7 +91,7 @@ struct cphy {
91 int state; /* Link status state machine */ 91 int state; /* Link status state machine */
92 adapter_t *adapter; /* associated adapter */ 92 adapter_t *adapter; /* associated adapter */
93 93
94 struct work_struct phy_update; 94 struct delayed_work phy_update;
95 95
96 u16 bmsr; 96 u16 bmsr;
97 int count; 97 int count;
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 53bec6739812..de48eadddbc4 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -953,10 +953,11 @@ static void t1_netpoll(struct net_device *dev)
953 * Periodic accumulation of MAC statistics. This is used only if the MAC 953 * Periodic accumulation of MAC statistics. This is used only if the MAC
954 * does not have any other way to prevent stats counter overflow. 954 * does not have any other way to prevent stats counter overflow.
955 */ 955 */
956static void mac_stats_task(void *data) 956static void mac_stats_task(struct work_struct *work)
957{ 957{
958 int i; 958 int i;
959 struct adapter *adapter = data; 959 struct adapter *adapter =
960 container_of(work, struct adapter, stats_update_task.work);
960 961
961 for_each_port(adapter, i) { 962 for_each_port(adapter, i) {
962 struct port_info *p = &adapter->port[i]; 963 struct port_info *p = &adapter->port[i];
@@ -977,9 +978,10 @@ static void mac_stats_task(void *data)
977/* 978/*
978 * Processes elmer0 external interrupts in process context. 979 * Processes elmer0 external interrupts in process context.
979 */ 980 */
980static void ext_intr_task(void *data) 981static void ext_intr_task(struct work_struct *work)
981{ 982{
982 struct adapter *adapter = data; 983 struct adapter *adapter =
984 container_of(work, struct adapter, ext_intr_handler_task);
983 985
984 t1_elmer0_ext_intr_handler(adapter); 986 t1_elmer0_ext_intr_handler(adapter);
985 987
@@ -1113,9 +1115,9 @@ static int __devinit init_one(struct pci_dev *pdev,
1113 spin_lock_init(&adapter->mac_lock); 1115 spin_lock_init(&adapter->mac_lock);
1114 1116
1115 INIT_WORK(&adapter->ext_intr_handler_task, 1117 INIT_WORK(&adapter->ext_intr_handler_task,
1116 ext_intr_task, adapter); 1118 ext_intr_task);
1117 INIT_WORK(&adapter->stats_update_task, mac_stats_task, 1119 INIT_DELAYED_WORK(&adapter->stats_update_task,
1118 adapter); 1120 mac_stats_task);
1119 1121
1120 pci_set_drvdata(pdev, netdev); 1122 pci_set_drvdata(pdev, netdev);
1121 } 1123 }
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 0b90014d5b3e..c7731b6f9de3 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -93,9 +93,11 @@ static int my3126_interrupt_handler(struct cphy *cphy)
93 return cphy_cause_link_change; 93 return cphy_cause_link_change;
94} 94}
95 95
96static void my3216_poll(void *arg) 96static void my3216_poll(struct work_struct *work)
97{ 97{
98 my3126_interrupt_handler(arg); 98 struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
99
100 my3126_interrupt_handler(cphy);
99} 101}
100 102
101static int my3126_set_loopback(struct cphy *cphy, int on) 103static int my3126_set_loopback(struct cphy *cphy, int on)
@@ -171,7 +173,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
171 if (cphy) 173 if (cphy)
172 cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); 174 cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
173 175
174 INIT_WORK(&cphy->phy_update, my3216_poll, cphy); 176 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
175 cphy->bmsr = 0; 177 cphy->bmsr = 0;
176 178
177 return (cphy); 179 return (cphy);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3a8df479cbda..03bf164f9e8d 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2102,9 +2102,10 @@ static void e100_tx_timeout(struct net_device *netdev)
2102 schedule_work(&nic->tx_timeout_task); 2102 schedule_work(&nic->tx_timeout_task);
2103} 2103}
2104 2104
2105static void e100_tx_timeout_task(struct net_device *netdev) 2105static void e100_tx_timeout_task(struct work_struct *work)
2106{ 2106{
2107 struct nic *nic = netdev_priv(netdev); 2107 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2108 struct net_device *netdev = nic->netdev;
2108 2109
2109 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", 2110 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2110 readb(&nic->csr->scb.status)); 2111 readb(&nic->csr->scb.status));
@@ -2637,8 +2638,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2637 nic->blink_timer.function = e100_blink_led; 2638 nic->blink_timer.function = e100_blink_led;
2638 nic->blink_timer.data = (unsigned long)nic; 2639 nic->blink_timer.data = (unsigned long)nic;
2639 2640
2640 INIT_WORK(&nic->tx_timeout_task, 2641 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2641 (void (*)(void *))e100_tx_timeout_task, netdev);
2642 2642
2643 if((err = e100_alloc(nic))) { 2643 if((err = e100_alloc(nic))) {
2644 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); 2644 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 32dde0adb683..73f3a85fd238 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -190,7 +190,7 @@ void e1000_set_ethtool_ops(struct net_device *netdev);
190static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 190static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
191static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 191static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
192static void e1000_tx_timeout(struct net_device *dev); 192static void e1000_tx_timeout(struct net_device *dev);
193static void e1000_reset_task(struct net_device *dev); 193static void e1000_reset_task(struct work_struct *work);
194static void e1000_smartspeed(struct e1000_adapter *adapter); 194static void e1000_smartspeed(struct e1000_adapter *adapter);
195static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 195static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
196 struct sk_buff *skb); 196 struct sk_buff *skb);
@@ -914,8 +914,7 @@ e1000_probe(struct pci_dev *pdev,
914 adapter->phy_info_timer.function = &e1000_update_phy_info; 914 adapter->phy_info_timer.function = &e1000_update_phy_info;
915 adapter->phy_info_timer.data = (unsigned long) adapter; 915 adapter->phy_info_timer.data = (unsigned long) adapter;
916 916
917 INIT_WORK(&adapter->reset_task, 917 INIT_WORK(&adapter->reset_task, e1000_reset_task);
918 (void (*)(void *))e1000_reset_task, netdev);
919 918
920 e1000_check_options(adapter); 919 e1000_check_options(adapter);
921 920
@@ -3306,9 +3305,10 @@ e1000_tx_timeout(struct net_device *netdev)
3306} 3305}
3307 3306
3308static void 3307static void
3309e1000_reset_task(struct net_device *netdev) 3308e1000_reset_task(struct work_struct *work)
3310{ 3309{
3311 struct e1000_adapter *adapter = netdev_priv(netdev); 3310 struct e1000_adapter *adapter =
3311 container_of(work, struct e1000_adapter, reset_task);
3312 3312
3313 e1000_reinit_locked(adapter); 3313 e1000_reinit_locked(adapter);
3314} 3314}
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 6ad696101418..83fa32f72398 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2224,11 +2224,12 @@ static int ehea_stop(struct net_device *dev)
2224 return ret; 2224 return ret;
2225} 2225}
2226 2226
2227static void ehea_reset_port(void *data) 2227static void ehea_reset_port(struct work_struct *work)
2228{ 2228{
2229 int ret; 2229 int ret;
2230 struct net_device *dev = data; 2230 struct ehea_port *port =
2231 struct ehea_port *port = netdev_priv(dev); 2231 container_of(work, struct ehea_port, reset_task);
2232 struct net_device *dev = port->netdev;
2232 2233
2233 port->resets++; 2234 port->resets++;
2234 down(&port->port_lock); 2235 down(&port->port_lock);
@@ -2379,7 +2380,7 @@ static int ehea_setup_single_port(struct ehea_port *port,
2379 dev->tx_timeout = &ehea_tx_watchdog; 2380 dev->tx_timeout = &ehea_tx_watchdog;
2380 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 2381 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
2381 2382
2382 INIT_WORK(&port->reset_task, ehea_reset_port, dev); 2383 INIT_WORK(&port->reset_task, ehea_reset_port);
2383 2384
2384 ehea_set_ethtool_ops(dev); 2385 ehea_set_ethtool_ops(dev);
2385 2386
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 1ed9cccd3c11..3c33d6f6a6a6 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -168,8 +168,9 @@ struct baycom_state {
168 int magic; 168 int magic;
169 169
170 struct pardevice *pdev; 170 struct pardevice *pdev;
171 struct net_device *dev;
171 unsigned int work_running; 172 unsigned int work_running;
172 struct work_struct run_work; 173 struct delayed_work run_work;
173 unsigned int modem; 174 unsigned int modem;
174 unsigned int bitrate; 175 unsigned int bitrate;
175 unsigned char stat; 176 unsigned char stat;
@@ -659,16 +660,18 @@ static int receive(struct net_device *dev, int cnt)
659#define GETTICK(x) 660#define GETTICK(x)
660#endif /* __i386__ */ 661#endif /* __i386__ */
661 662
662static void epp_bh(struct net_device *dev) 663static void epp_bh(struct work_struct *work)
663{ 664{
665 struct net_device *dev;
664 struct baycom_state *bc; 666 struct baycom_state *bc;
665 struct parport *pp; 667 struct parport *pp;
666 unsigned char stat; 668 unsigned char stat;
667 unsigned char tmp[2]; 669 unsigned char tmp[2];
668 unsigned int time1 = 0, time2 = 0, time3 = 0; 670 unsigned int time1 = 0, time2 = 0, time3 = 0;
669 int cnt, cnt2; 671 int cnt, cnt2;
670 672
671 bc = netdev_priv(dev); 673 bc = container_of(work, struct baycom_state, run_work.work);
674 dev = bc->dev;
672 if (!bc->work_running) 675 if (!bc->work_running)
673 return; 676 return;
674 baycom_int_freq(bc); 677 baycom_int_freq(bc);
@@ -889,7 +892,7 @@ static int epp_open(struct net_device *dev)
889 return -EBUSY; 892 return -EBUSY;
890 } 893 }
891 dev->irq = /*pp->irq*/ 0; 894 dev->irq = /*pp->irq*/ 0;
892 INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev); 895 INIT_DELAYED_WORK(&bc->run_work, epp_bh);
893 bc->work_running = 1; 896 bc->work_running = 1;
894 bc->modem = EPP_CONVENTIONAL; 897 bc->modem = EPP_CONVENTIONAL;
895 if (eppconfig(bc)) 898 if (eppconfig(bc))
@@ -1213,6 +1216,7 @@ static void __init baycom_epp_dev_setup(struct net_device *dev)
1213 /* 1216 /*
1214 * initialize part of the baycom_state struct 1217 * initialize part of the baycom_state struct
1215 */ 1218 */
1219 bc->dev = dev;
1216 bc->magic = BAYCOM_MAGIC; 1220 bc->magic = BAYCOM_MAGIC;
1217 bc->cfg.fclk = 19666600; 1221 bc->cfg.fclk = 19666600;
1218 bc->cfg.bps = 9600; 1222 bc->cfg.bps = 9600;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b32c52ed19d7..f0c61f3b2a82 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -560,9 +560,9 @@ static inline int mcs_find_endpoints(struct mcs_cb *mcs,
560 return ret; 560 return ret;
561} 561}
562 562
563static void mcs_speed_work(void *arg) 563static void mcs_speed_work(struct work_struct *work)
564{ 564{
565 struct mcs_cb *mcs = arg; 565 struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
566 struct net_device *netdev = mcs->netdev; 566 struct net_device *netdev = mcs->netdev;
567 567
568 mcs_speed_change(mcs); 568 mcs_speed_change(mcs);
@@ -927,7 +927,7 @@ static int mcs_probe(struct usb_interface *intf,
927 irda_qos_bits_to_value(&mcs->qos); 927 irda_qos_bits_to_value(&mcs->qos);
928 928
929 /* Speed change work initialisation*/ 929 /* Speed change work initialisation*/
930 INIT_WORK(&mcs->work, mcs_speed_work, mcs); 930 INIT_WORK(&mcs->work, mcs_speed_work);
931 931
932 /* Override the network functions we need to use */ 932 /* Override the network functions we need to use */
933 ndev->hard_start_xmit = mcs_hard_xmit; 933 ndev->hard_start_xmit = mcs_hard_xmit;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 9fa294a546d6..2a57bc67ce35 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -22,7 +22,7 @@
22 22
23struct sir_fsm { 23struct sir_fsm {
24 struct semaphore sem; 24 struct semaphore sem;
25 struct work_struct work; 25 struct delayed_work work;
26 unsigned state, substate; 26 unsigned state, substate;
27 int param; 27 int param;
28 int result; 28 int result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 3b5854d10c17..17b0c3ab6201 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -100,9 +100,9 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
100 * Both must be unlocked/restarted on completion - but only on final exit. 100 * Both must be unlocked/restarted on completion - but only on final exit.
101 */ 101 */
102 102
103static void sirdev_config_fsm(void *data) 103static void sirdev_config_fsm(struct work_struct *work)
104{ 104{
105 struct sir_dev *dev = data; 105 struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
106 struct sir_fsm *fsm = &dev->fsm; 106 struct sir_fsm *fsm = &dev->fsm;
107 int next_state; 107 int next_state;
108 int ret = -1; 108 int ret = -1;
@@ -309,8 +309,8 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
309 fsm->param = param; 309 fsm->param = param;
310 fsm->result = 0; 310 fsm->result = 0;
311 311
312 INIT_WORK(&fsm->work, sirdev_config_fsm, dev); 312 INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
313 queue_work(irda_sir_wq, &fsm->work); 313 queue_delayed_work(irda_sir_wq, &fsm->work, 0);
314 return 0; 314 return 0;
315} 315}
316 316
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 2284e2ce1692..d6f4f185bf37 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -166,7 +166,7 @@ struct veth_msg {
166 166
167struct veth_lpar_connection { 167struct veth_lpar_connection {
168 HvLpIndex remote_lp; 168 HvLpIndex remote_lp;
169 struct work_struct statemachine_wq; 169 struct delayed_work statemachine_wq;
170 struct veth_msg *msgs; 170 struct veth_msg *msgs;
171 int num_events; 171 int num_events;
172 struct veth_cap_data local_caps; 172 struct veth_cap_data local_caps;
@@ -456,7 +456,7 @@ static struct kobj_type veth_port_ktype = {
456 456
457static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx) 457static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
458{ 458{
459 schedule_work(&cnx->statemachine_wq); 459 schedule_delayed_work(&cnx->statemachine_wq, 0);
460} 460}
461 461
462static void veth_take_cap(struct veth_lpar_connection *cnx, 462static void veth_take_cap(struct veth_lpar_connection *cnx,
@@ -638,9 +638,11 @@ static int veth_process_caps(struct veth_lpar_connection *cnx)
638} 638}
639 639
640/* FIXME: The gotos here are a bit dubious */ 640/* FIXME: The gotos here are a bit dubious */
641static void veth_statemachine(void *p) 641static void veth_statemachine(struct work_struct *work)
642{ 642{
643 struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p; 643 struct veth_lpar_connection *cnx =
644 container_of(work, struct veth_lpar_connection,
645 statemachine_wq.work);
644 int rlp = cnx->remote_lp; 646 int rlp = cnx->remote_lp;
645 int rc; 647 int rc;
646 648
@@ -827,7 +829,7 @@ static int veth_init_connection(u8 rlp)
827 829
828 cnx->remote_lp = rlp; 830 cnx->remote_lp = rlp;
829 spin_lock_init(&cnx->lock); 831 spin_lock_init(&cnx->lock);
830 INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx); 832 INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
831 833
832 init_timer(&cnx->ack_timer); 834 init_timer(&cnx->ack_timer);
833 cnx->ack_timer.function = veth_timed_ack; 835 cnx->ack_timer.function = veth_timed_ack;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7b127212e62b..e628126c9c49 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -106,7 +106,7 @@ static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); 106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
107void ixgb_set_ethtool_ops(struct net_device *netdev); 107void ixgb_set_ethtool_ops(struct net_device *netdev);
108static void ixgb_tx_timeout(struct net_device *dev); 108static void ixgb_tx_timeout(struct net_device *dev);
109static void ixgb_tx_timeout_task(struct net_device *dev); 109static void ixgb_tx_timeout_task(struct work_struct *work);
110static void ixgb_vlan_rx_register(struct net_device *netdev, 110static void ixgb_vlan_rx_register(struct net_device *netdev,
111 struct vlan_group *grp); 111 struct vlan_group *grp);
112static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 112static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -489,8 +489,7 @@ ixgb_probe(struct pci_dev *pdev,
489 adapter->watchdog_timer.function = &ixgb_watchdog; 489 adapter->watchdog_timer.function = &ixgb_watchdog;
490 adapter->watchdog_timer.data = (unsigned long)adapter; 490 adapter->watchdog_timer.data = (unsigned long)adapter;
491 491
492 INIT_WORK(&adapter->tx_timeout_task, 492 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
493 (void (*)(void *))ixgb_tx_timeout_task, netdev);
494 493
495 strcpy(netdev->name, "eth%d"); 494 strcpy(netdev->name, "eth%d");
496 if((err = register_netdev(netdev))) 495 if((err = register_netdev(netdev)))
@@ -1493,9 +1492,10 @@ ixgb_tx_timeout(struct net_device *netdev)
1493} 1492}
1494 1493
1495static void 1494static void
1496ixgb_tx_timeout_task(struct net_device *netdev) 1495ixgb_tx_timeout_task(struct work_struct *work)
1497{ 1496{
1498 struct ixgb_adapter *adapter = netdev_priv(netdev); 1497 struct ixgb_adapter *adapter =
1498 container_of(work, struct ixgb_adapter, tx_timeout_task);
1499 1499
1500 adapter->tx_timeout_count++; 1500 adapter->tx_timeout_count++;
1501 ixgb_down(adapter, TRUE); 1501 ixgb_down(adapter, TRUE);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 36350e6db1c1..38df42802386 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2615,9 +2615,10 @@ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
2615 * This watchdog is used to check whether the board has suffered 2615 * This watchdog is used to check whether the board has suffered
2616 * from a parity error and needs to be recovered. 2616 * from a parity error and needs to be recovered.
2617 */ 2617 */
2618static void myri10ge_watchdog(void *arg) 2618static void myri10ge_watchdog(struct work_struct *work)
2619{ 2619{
2620 struct myri10ge_priv *mgp = arg; 2620 struct myri10ge_priv *mgp =
2621 container_of(work, struct myri10ge_priv, watchdog_work);
2621 u32 reboot; 2622 u32 reboot;
2622 int status; 2623 int status;
2623 u16 cmd, vendor; 2624 u16 cmd, vendor;
@@ -2887,7 +2888,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2887 (unsigned long)mgp); 2888 (unsigned long)mgp);
2888 2889
2889 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); 2890 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
2890 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp); 2891 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
2891 status = register_netdev(netdev); 2892 status = register_netdev(netdev);
2892 if (status != 0) { 2893 if (status != 0) {
2893 dev_err(&pdev->dev, "register_netdev failed: %d\n", status); 2894 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index d925053fe597..9c588af8ab74 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -714,6 +714,7 @@ struct netxen_adapter {
714 spinlock_t lock; 714 spinlock_t lock;
715 struct work_struct watchdog_task; 715 struct work_struct watchdog_task;
716 struct work_struct tx_timeout_task; 716 struct work_struct tx_timeout_task;
717 struct net_device *netdev;
717 struct timer_list watchdog_timer; 718 struct timer_list watchdog_timer;
718 719
719 u32 curr_window; 720 u32 curr_window;
@@ -921,7 +922,7 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
921 struct netxen_port *port); 922 struct netxen_port *port);
922int netxen_nic_rx_has_work(struct netxen_adapter *adapter); 923int netxen_nic_rx_has_work(struct netxen_adapter *adapter);
923int netxen_nic_tx_has_work(struct netxen_adapter *adapter); 924int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
924void netxen_watchdog_task(unsigned long v); 925void netxen_watchdog_task(struct work_struct *work);
925void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, 926void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
926 u32 ringid); 927 u32 ringid);
927void netxen_process_cmd_ring(unsigned long data); 928void netxen_process_cmd_ring(unsigned long data);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 0dca029bc3e5..eae18236aefa 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -710,12 +710,13 @@ static inline int netxen_nic_check_temp(struct netxen_adapter *adapter)
710 return rv; 710 return rv;
711} 711}
712 712
713void netxen_watchdog_task(unsigned long v) 713void netxen_watchdog_task(struct work_struct *work)
714{ 714{
715 int port_num; 715 int port_num;
716 struct netxen_port *port; 716 struct netxen_port *port;
717 struct net_device *netdev; 717 struct net_device *netdev;
718 struct netxen_adapter *adapter = (struct netxen_adapter *)v; 718 struct netxen_adapter *adapter =
719 container_of(work, struct netxen_adapter, watchdog_task);
719 720
720 if (netxen_nic_check_temp(adapter)) 721 if (netxen_nic_check_temp(adapter))
721 return; 722 return;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 1cb662d5bd76..df0bb36a1cfb 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -64,7 +64,7 @@ static int netxen_nic_open(struct net_device *netdev);
64static int netxen_nic_close(struct net_device *netdev); 64static int netxen_nic_close(struct net_device *netdev);
65static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); 65static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
66static void netxen_tx_timeout(struct net_device *netdev); 66static void netxen_tx_timeout(struct net_device *netdev);
67static void netxen_tx_timeout_task(struct net_device *netdev); 67static void netxen_tx_timeout_task(struct work_struct *work);
68static void netxen_watchdog(unsigned long); 68static void netxen_watchdog(unsigned long);
69static int netxen_handle_int(struct netxen_adapter *, struct net_device *); 69static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
70static int netxen_nic_ioctl(struct net_device *netdev, 70static int netxen_nic_ioctl(struct net_device *netdev,
@@ -274,8 +274,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
274 adapter->ahw.xg_linkup = 0; 274 adapter->ahw.xg_linkup = 0;
275 adapter->watchdog_timer.function = &netxen_watchdog; 275 adapter->watchdog_timer.function = &netxen_watchdog;
276 adapter->watchdog_timer.data = (unsigned long)adapter; 276 adapter->watchdog_timer.data = (unsigned long)adapter;
277 INIT_WORK(&adapter->watchdog_task, 277 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
278 (void (*)(void *))netxen_watchdog_task, adapter);
279 adapter->ahw.pdev = pdev; 278 adapter->ahw.pdev = pdev;
280 adapter->proc_cmd_buf_counter = 0; 279 adapter->proc_cmd_buf_counter = 0;
281 pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id); 280 pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id);
@@ -379,8 +378,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
379 dev_addr); 378 dev_addr);
380 } 379 }
381 } 380 }
382 INIT_WORK(&adapter->tx_timeout_task, 381 adapter->netdev = netdev;
383 (void (*)(void *))netxen_tx_timeout_task, netdev); 382 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
384 netif_carrier_off(netdev); 383 netif_carrier_off(netdev);
385 netif_stop_queue(netdev); 384 netif_stop_queue(netdev);
386 385
@@ -938,18 +937,20 @@ static void netxen_tx_timeout(struct net_device *netdev)
938 schedule_work(&adapter->tx_timeout_task); 937 schedule_work(&adapter->tx_timeout_task);
939} 938}
940 939
941static void netxen_tx_timeout_task(struct net_device *netdev) 940static void netxen_tx_timeout_task(struct work_struct *work)
942{ 941{
943 struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); 942 struct netxen_adapter *adapter =
943 container_of(work, struct netxen_adapter, tx_timeout_task);
944 struct net_device *netdev = adapter->netdev;
944 unsigned long flags; 945 unsigned long flags;
945 946
946 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", 947 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
947 netxen_nic_driver_name, netdev->name); 948 netxen_nic_driver_name, netdev->name);
948 949
949 spin_lock_irqsave(&port->adapter->lock, flags); 950 spin_lock_irqsave(&adapter->lock, flags);
950 netxen_nic_close(netdev); 951 netxen_nic_close(netdev);
951 netxen_nic_open(netdev); 952 netxen_nic_open(netdev);
952 spin_unlock_irqrestore(&port->adapter->lock, flags); 953 spin_unlock_irqrestore(&adapter->lock, flags);
953 netdev->trans_start = jiffies; 954 netdev->trans_start = jiffies;
954 netif_wake_queue(netdev); 955 netif_wake_queue(netdev);
955} 956}
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index b0127c71a5b6..312e0e331712 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -427,6 +427,7 @@ struct ns83820 {
427 u8 __iomem *base; 427 u8 __iomem *base;
428 428
429 struct pci_dev *pci_dev; 429 struct pci_dev *pci_dev;
430 struct net_device *ndev;
430 431
431#ifdef NS83820_VLAN_ACCEL_SUPPORT 432#ifdef NS83820_VLAN_ACCEL_SUPPORT
432 struct vlan_group *vlgrp; 433 struct vlan_group *vlgrp;
@@ -631,10 +632,10 @@ static void fastcall rx_refill_atomic(struct net_device *ndev)
631} 632}
632 633
633/* REFILL */ 634/* REFILL */
634static inline void queue_refill(void *_dev) 635static inline void queue_refill(struct work_struct *work)
635{ 636{
636 struct net_device *ndev = _dev; 637 struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
637 struct ns83820 *dev = PRIV(ndev); 638 struct net_device *ndev = dev->ndev;
638 639
639 rx_refill(ndev, GFP_KERNEL); 640 rx_refill(ndev, GFP_KERNEL);
640 if (dev->rx_info.up) 641 if (dev->rx_info.up)
@@ -1841,6 +1842,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
1841 1842
1842 ndev = alloc_etherdev(sizeof(struct ns83820)); 1843 ndev = alloc_etherdev(sizeof(struct ns83820));
1843 dev = PRIV(ndev); 1844 dev = PRIV(ndev);
1845 dev->ndev = ndev;
1844 err = -ENOMEM; 1846 err = -ENOMEM;
1845 if (!dev) 1847 if (!dev)
1846 goto out; 1848 goto out;
@@ -1853,7 +1855,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
1853 SET_MODULE_OWNER(ndev); 1855 SET_MODULE_OWNER(ndev);
1854 SET_NETDEV_DEV(ndev, &pci_dev->dev); 1856 SET_NETDEV_DEV(ndev, &pci_dev->dev);
1855 1857
1856 INIT_WORK(&dev->tq_refill, queue_refill, ndev); 1858 INIT_WORK(&dev->tq_refill, queue_refill);
1857 tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); 1859 tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
1858 1860
1859 err = pci_enable_device(pci_dev); 1861 err = pci_enable_device(pci_dev);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 046009928526..794cc61819dd 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -338,7 +338,6 @@ static int tc574_config(struct pcmcia_device *link)
338 struct net_device *dev = link->priv; 338 struct net_device *dev = link->priv;
339 struct el3_private *lp = netdev_priv(dev); 339 struct el3_private *lp = netdev_priv(dev);
340 tuple_t tuple; 340 tuple_t tuple;
341 cisparse_t parse;
342 unsigned short buf[32]; 341 unsigned short buf[32];
343 int last_fn, last_ret, i, j; 342 int last_fn, last_ret, i, j;
344 kio_addr_t ioaddr; 343 kio_addr_t ioaddr;
@@ -350,17 +349,6 @@ static int tc574_config(struct pcmcia_device *link)
350 349
351 DEBUG(0, "3c574_config(0x%p)\n", link); 350 DEBUG(0, "3c574_config(0x%p)\n", link);
352 351
353 tuple.Attributes = 0;
354 tuple.DesiredTuple = CISTPL_CONFIG;
355 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
356 tuple.TupleData = (cisdata_t *)buf;
357 tuple.TupleDataMax = 64;
358 tuple.TupleOffset = 0;
359 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
360 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
361 link->conf.ConfigBase = parse.config.base;
362 link->conf.Present = parse.config.rmask[0];
363
364 link->io.IOAddrLines = 16; 352 link->io.IOAddrLines = 16;
365 for (i = j = 0; j < 0x400; j += 0x20) { 353 for (i = j = 0; j < 0x400; j += 0x20) {
366 link->io.BasePort1 = j ^ 0x300; 354 link->io.BasePort1 = j ^ 0x300;
@@ -382,6 +370,10 @@ static int tc574_config(struct pcmcia_device *link)
382 /* The 3c574 normally uses an EEPROM for configuration info, including 370 /* The 3c574 normally uses an EEPROM for configuration info, including
383 the hardware address. The future products may include a modem chip 371 the hardware address. The future products may include a modem chip
384 and put the address in the CIS. */ 372 and put the address in the CIS. */
373 tuple.Attributes = 0;
374 tuple.TupleData = (cisdata_t *)buf;
375 tuple.TupleDataMax = 64;
376 tuple.TupleOffset = 0;
385 tuple.DesiredTuple = 0x88; 377 tuple.DesiredTuple = 0x88;
386 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) { 378 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
387 pcmcia_get_tuple_data(link, &tuple); 379 pcmcia_get_tuple_data(link, &tuple);
@@ -397,12 +389,9 @@ static int tc574_config(struct pcmcia_device *link)
397 goto failed; 389 goto failed;
398 } 390 }
399 } 391 }
400 tuple.DesiredTuple = CISTPL_VERS_1; 392 if (link->prod_id[1])
401 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS && 393 cardname = link->prod_id[1];
402 pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS && 394 else
403 pcmcia_parse_tuple(link, &tuple, &parse) == CS_SUCCESS) {
404 cardname = parse.version_1.str + parse.version_1.ofs[1];
405 } else
406 cardname = "3Com 3c574"; 395 cardname = "3Com 3c574";
407 396
408 { 397 {
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 231fa2c9ec6c..1e73ff7d5d8e 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -253,7 +253,6 @@ static int tc589_config(struct pcmcia_device *link)
253 struct net_device *dev = link->priv; 253 struct net_device *dev = link->priv;
254 struct el3_private *lp = netdev_priv(dev); 254 struct el3_private *lp = netdev_priv(dev);
255 tuple_t tuple; 255 tuple_t tuple;
256 cisparse_t parse;
257 u16 buf[32], *phys_addr; 256 u16 buf[32], *phys_addr;
258 int last_fn, last_ret, i, j, multi = 0, fifo; 257 int last_fn, last_ret, i, j, multi = 0, fifo;
259 kio_addr_t ioaddr; 258 kio_addr_t ioaddr;
@@ -263,26 +262,16 @@ static int tc589_config(struct pcmcia_device *link)
263 262
264 phys_addr = (u16 *)dev->dev_addr; 263 phys_addr = (u16 *)dev->dev_addr;
265 tuple.Attributes = 0; 264 tuple.Attributes = 0;
266 tuple.DesiredTuple = CISTPL_CONFIG;
267 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
268 tuple.TupleData = (cisdata_t *)buf; 265 tuple.TupleData = (cisdata_t *)buf;
269 tuple.TupleDataMax = sizeof(buf); 266 tuple.TupleDataMax = sizeof(buf);
270 tuple.TupleOffset = 0; 267 tuple.TupleOffset = 0;
271 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
272 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
273 link->conf.ConfigBase = parse.config.base;
274 link->conf.Present = parse.config.rmask[0];
275
276 /* Is this a 3c562? */
277 tuple.DesiredTuple = CISTPL_MANFID;
278 tuple.Attributes = TUPLE_RETURN_COMMON; 268 tuple.Attributes = TUPLE_RETURN_COMMON;
279 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) && 269
280 (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) { 270 /* Is this a 3c562? */
281 if (le16_to_cpu(buf[0]) != MANFID_3COM) 271 if (link->manf_id != MANFID_3COM)
282 printk(KERN_INFO "3c589_cs: hmmm, is this really a " 272 printk(KERN_INFO "3c589_cs: hmmm, is this really a "
283 "3Com card??\n"); 273 "3Com card??\n");
284 multi = (le16_to_cpu(buf[1]) == PRODID_3COM_3C562); 274 multi = (link->card_id == PRODID_3COM_3C562);
285 }
286 275
287 /* For the 3c562, the base address must be xx00-xx7f */ 276 /* For the 3c562, the base address must be xx00-xx7f */
288 link->io.IOAddrLines = 16; 277 link->io.IOAddrLines = 16;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 5ddd5742f779..6139048f8117 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -299,11 +299,7 @@ static int axnet_config(struct pcmcia_device *link)
299 tuple.TupleData = (cisdata_t *)buf; 299 tuple.TupleData = (cisdata_t *)buf;
300 tuple.TupleDataMax = sizeof(buf); 300 tuple.TupleDataMax = sizeof(buf);
301 tuple.TupleOffset = 0; 301 tuple.TupleOffset = 0;
302 tuple.DesiredTuple = CISTPL_CONFIG; 302
303 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
304 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
305 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
306 link->conf.ConfigBase = parse.config.base;
307 /* don't trust the CIS on this; Linksys got it wrong */ 303 /* don't trust the CIS on this; Linksys got it wrong */
308 link->conf.Present = 0x63; 304 link->conf.Present = 0x63;
309 305
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 48434d7924eb..91f65e91cd5f 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -249,12 +249,9 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
249static int com20020_config(struct pcmcia_device *link) 249static int com20020_config(struct pcmcia_device *link)
250{ 250{
251 struct arcnet_local *lp; 251 struct arcnet_local *lp;
252 tuple_t tuple;
253 cisparse_t parse;
254 com20020_dev_t *info; 252 com20020_dev_t *info;
255 struct net_device *dev; 253 struct net_device *dev;
256 int i, last_ret, last_fn; 254 int i, last_ret, last_fn;
257 u_char buf[64];
258 int ioaddr; 255 int ioaddr;
259 256
260 info = link->priv; 257 info = link->priv;
@@ -264,16 +261,6 @@ static int com20020_config(struct pcmcia_device *link)
264 261
265 DEBUG(0, "com20020_config(0x%p)\n", link); 262 DEBUG(0, "com20020_config(0x%p)\n", link);
266 263
267 tuple.Attributes = 0;
268 tuple.TupleData = buf;
269 tuple.TupleDataMax = 64;
270 tuple.TupleOffset = 0;
271 tuple.DesiredTuple = CISTPL_CONFIG;
272 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
273 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
274 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
275 link->conf.ConfigBase = parse.config.base;
276
277 DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1); 264 DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1);
278 i = !CS_SUCCESS; 265 i = !CS_SUCCESS;
279 if (!link->io.BasePort1) 266 if (!link->io.BasePort1)
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 65f6fdf43725..0d7de617e535 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -342,7 +342,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
342 tuple_t tuple; 342 tuple_t tuple;
343 cisparse_t parse; 343 cisparse_t parse;
344 u_short buf[32]; 344 u_short buf[32];
345 int i, last_fn, last_ret, ret; 345 int i, last_fn = 0, last_ret = 0, ret;
346 kio_addr_t ioaddr; 346 kio_addr_t ioaddr;
347 cardtype_t cardtype; 347 cardtype_t cardtype;
348 char *card_name = "unknown"; 348 char *card_name = "unknown";
@@ -350,21 +350,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
350 350
351 DEBUG(0, "fmvj18x_config(0x%p)\n", link); 351 DEBUG(0, "fmvj18x_config(0x%p)\n", link);
352 352
353 /*
354 This reads the card's CONFIG tuple to find its configuration
355 registers.
356 */
357 tuple.DesiredTuple = CISTPL_CONFIG;
358 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
359 tuple.TupleData = (u_char *)buf; 353 tuple.TupleData = (u_char *)buf;
360 tuple.TupleDataMax = 64; 354 tuple.TupleDataMax = 64;
361 tuple.TupleOffset = 0; 355 tuple.TupleOffset = 0;
362 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
363 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
364
365 link->conf.ConfigBase = parse.config.base;
366 link->conf.Present = parse.config.rmask[0];
367
368 tuple.DesiredTuple = CISTPL_FUNCE; 356 tuple.DesiredTuple = CISTPL_FUNCE;
369 tuple.TupleOffset = 0; 357 tuple.TupleOffset = 0;
370 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) { 358 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
@@ -374,17 +362,12 @@ static int fmvj18x_config(struct pcmcia_device *link)
374 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); 362 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
375 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse)); 363 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
376 link->conf.ConfigIndex = parse.cftable_entry.index; 364 link->conf.ConfigIndex = parse.cftable_entry.index;
377 tuple.DesiredTuple = CISTPL_MANFID; 365 switch (link->manf_id) {
378 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
379 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
380 else
381 buf[0] = 0xffff;
382 switch (le16_to_cpu(buf[0])) {
383 case MANFID_TDK: 366 case MANFID_TDK:
384 cardtype = TDK; 367 cardtype = TDK;
385 if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410 368 if (link->card_id == PRODID_TDK_GN3410
386 || le16_to_cpu(buf[1]) == PRODID_TDK_NP9610 369 || link->card_id == PRODID_TDK_NP9610
387 || le16_to_cpu(buf[1]) == PRODID_TDK_MN3200) { 370 || link->card_id == PRODID_TDK_MN3200) {
388 /* MultiFunction Card */ 371 /* MultiFunction Card */
389 link->conf.ConfigBase = 0x800; 372 link->conf.ConfigBase = 0x800;
390 link->conf.ConfigIndex = 0x47; 373 link->conf.ConfigIndex = 0x47;
@@ -395,11 +378,11 @@ static int fmvj18x_config(struct pcmcia_device *link)
395 cardtype = CONTEC; 378 cardtype = CONTEC;
396 break; 379 break;
397 case MANFID_FUJITSU: 380 case MANFID_FUJITSU:
398 if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10302) 381 if (link->card_id == PRODID_FUJITSU_MBH10302)
399 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), 382 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
400 but these are MBH10304 based card. */ 383 but these are MBH10304 based card. */
401 cardtype = MBH10304; 384 cardtype = MBH10304;
402 else if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304) 385 else if (link->card_id == PRODID_FUJITSU_MBH10304)
403 cardtype = MBH10304; 386 cardtype = MBH10304;
404 else 387 else
405 cardtype = LA501; 388 cardtype = LA501;
@@ -409,14 +392,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
409 } 392 }
410 } else { 393 } else {
411 /* old type card */ 394 /* old type card */
412 tuple.DesiredTuple = CISTPL_MANFID; 395 switch (link->manf_id) {
413 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
414 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
415 else
416 buf[0] = 0xffff;
417 switch (le16_to_cpu(buf[0])) {
418 case MANFID_FUJITSU: 396 case MANFID_FUJITSU:
419 if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304) { 397 if (link->card_id == PRODID_FUJITSU_MBH10304) {
420 cardtype = XXX10304; /* MBH10304 with buggy CIS */ 398 cardtype = XXX10304; /* MBH10304 with buggy CIS */
421 link->conf.ConfigIndex = 0x20; 399 link->conf.ConfigIndex = 0x20;
422 } else { 400 } else {
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index bc0ca41a0542..a956a51d284f 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -222,24 +222,12 @@ static int ibmtr_config(struct pcmcia_device *link)
222 ibmtr_dev_t *info = link->priv; 222 ibmtr_dev_t *info = link->priv;
223 struct net_device *dev = info->dev; 223 struct net_device *dev = info->dev;
224 struct tok_info *ti = netdev_priv(dev); 224 struct tok_info *ti = netdev_priv(dev);
225 tuple_t tuple;
226 cisparse_t parse;
227 win_req_t req; 225 win_req_t req;
228 memreq_t mem; 226 memreq_t mem;
229 int i, last_ret, last_fn; 227 int i, last_ret, last_fn;
230 u_char buf[64];
231 228
232 DEBUG(0, "ibmtr_config(0x%p)\n", link); 229 DEBUG(0, "ibmtr_config(0x%p)\n", link);
233 230
234 tuple.Attributes = 0;
235 tuple.TupleData = buf;
236 tuple.TupleDataMax = 64;
237 tuple.TupleOffset = 0;
238 tuple.DesiredTuple = CISTPL_CONFIG;
239 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
240 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
241 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
242 link->conf.ConfigBase = parse.config.base;
243 link->conf.ConfigIndex = 0x61; 231 link->conf.ConfigIndex = 0x61;
244 232
245 /* Determine if this is PRIMARY or ALTERNATE. */ 233 /* Determine if this is PRIMARY or ALTERNATE. */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index e77110e4c288..3b707747a811 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -656,23 +656,12 @@ static int nmclan_config(struct pcmcia_device *link)
656 struct net_device *dev = link->priv; 656 struct net_device *dev = link->priv;
657 mace_private *lp = netdev_priv(dev); 657 mace_private *lp = netdev_priv(dev);
658 tuple_t tuple; 658 tuple_t tuple;
659 cisparse_t parse;
660 u_char buf[64]; 659 u_char buf[64];
661 int i, last_ret, last_fn; 660 int i, last_ret, last_fn;
662 kio_addr_t ioaddr; 661 kio_addr_t ioaddr;
663 662
664 DEBUG(0, "nmclan_config(0x%p)\n", link); 663 DEBUG(0, "nmclan_config(0x%p)\n", link);
665 664
666 tuple.Attributes = 0;
667 tuple.TupleData = buf;
668 tuple.TupleDataMax = 64;
669 tuple.TupleOffset = 0;
670 tuple.DesiredTuple = CISTPL_CONFIG;
671 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
672 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
673 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
674 link->conf.ConfigBase = parse.config.base;
675
676 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); 665 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
677 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 666 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
678 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 667 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
@@ -686,6 +675,7 @@ static int nmclan_config(struct pcmcia_device *link)
686 tuple.TupleData = buf; 675 tuple.TupleData = buf;
687 tuple.TupleDataMax = 64; 676 tuple.TupleDataMax = 64;
688 tuple.TupleOffset = 0; 677 tuple.TupleOffset = 0;
678 tuple.Attributes = 0;
689 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 679 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
690 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); 680 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
691 memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN); 681 memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index c51cc5d8789a..2b1238e2dbdb 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -519,31 +519,15 @@ static int pcnet_config(struct pcmcia_device *link)
519 tuple_t tuple; 519 tuple_t tuple;
520 cisparse_t parse; 520 cisparse_t parse;
521 int i, last_ret, last_fn, start_pg, stop_pg, cm_offset; 521 int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
522 int manfid = 0, prodid = 0, has_shmem = 0; 522 int has_shmem = 0;
523 u_short buf[64]; 523 u_short buf[64];
524 hw_info_t *hw_info; 524 hw_info_t *hw_info;
525 525
526 DEBUG(0, "pcnet_config(0x%p)\n", link); 526 DEBUG(0, "pcnet_config(0x%p)\n", link);
527 527
528 tuple.Attributes = 0;
529 tuple.TupleData = (cisdata_t *)buf; 528 tuple.TupleData = (cisdata_t *)buf;
530 tuple.TupleDataMax = sizeof(buf); 529 tuple.TupleDataMax = sizeof(buf);
531 tuple.TupleOffset = 0; 530 tuple.TupleOffset = 0;
532 tuple.DesiredTuple = CISTPL_CONFIG;
533 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
534 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
535 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
536 link->conf.ConfigBase = parse.config.base;
537 link->conf.Present = parse.config.rmask[0];
538
539 tuple.DesiredTuple = CISTPL_MANFID;
540 tuple.Attributes = TUPLE_RETURN_COMMON;
541 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
542 (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) {
543 manfid = le16_to_cpu(buf[0]);
544 prodid = le16_to_cpu(buf[1]);
545 }
546
547 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 531 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
548 tuple.Attributes = 0; 532 tuple.Attributes = 0;
549 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 533 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
@@ -589,8 +573,8 @@ static int pcnet_config(struct pcmcia_device *link)
589 link->conf.Attributes |= CONF_ENABLE_SPKR; 573 link->conf.Attributes |= CONF_ENABLE_SPKR;
590 link->conf.Status = CCSR_AUDIO_ENA; 574 link->conf.Status = CCSR_AUDIO_ENA;
591 } 575 }
592 if ((manfid == MANFID_IBM) && 576 if ((link->manf_id == MANFID_IBM) &&
593 (prodid == PRODID_IBM_HOME_AND_AWAY)) 577 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
594 link->conf.ConfigIndex |= 0x10; 578 link->conf.ConfigIndex |= 0x10;
595 579
596 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 580 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
@@ -624,10 +608,10 @@ static int pcnet_config(struct pcmcia_device *link)
624 info->flags = hw_info->flags; 608 info->flags = hw_info->flags;
625 /* Check for user overrides */ 609 /* Check for user overrides */
626 info->flags |= (delay_output) ? DELAY_OUTPUT : 0; 610 info->flags |= (delay_output) ? DELAY_OUTPUT : 0;
627 if ((manfid == MANFID_SOCKET) && 611 if ((link->manf_id == MANFID_SOCKET) &&
628 ((prodid == PRODID_SOCKET_LPE) || 612 ((link->card_id == PRODID_SOCKET_LPE) ||
629 (prodid == PRODID_SOCKET_LPE_CF) || 613 (link->card_id == PRODID_SOCKET_LPE_CF) ||
630 (prodid == PRODID_SOCKET_EIO))) 614 (link->card_id == PRODID_SOCKET_EIO)))
631 info->flags &= ~USE_BIG_BUF; 615 info->flags &= ~USE_BIG_BUF;
632 if (!use_big_buf) 616 if (!use_big_buf)
633 info->flags &= ~USE_BIG_BUF; 617 info->flags &= ~USE_BIG_BUF;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 20fcc3576202..530df8883fe5 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -560,16 +560,8 @@ static int mhz_setup(struct pcmcia_device *link)
560 560
561 /* Read the station address from the CIS. It is stored as the last 561 /* Read the station address from the CIS. It is stored as the last
562 (fourth) string in the Version 1 Version/ID tuple. */ 562 (fourth) string in the Version 1 Version/ID tuple. */
563 tuple->DesiredTuple = CISTPL_VERS_1; 563 if (link->prod_id[3]) {
564 if (first_tuple(link, tuple, parse) != CS_SUCCESS) { 564 station_addr = link->prod_id[3];
565 rc = -1;
566 goto free_cfg_mem;
567 }
568 /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
569 if (next_tuple(link, tuple, parse) != CS_SUCCESS)
570 first_tuple(link, tuple, parse);
571 if (parse->version_1.ns > 3) {
572 station_addr = parse->version_1.str + parse->version_1.ofs[3];
573 if (cvt_ascii_address(dev, station_addr) == 0) { 565 if (cvt_ascii_address(dev, station_addr) == 0) {
574 rc = 0; 566 rc = 0;
575 goto free_cfg_mem; 567 goto free_cfg_mem;
@@ -744,15 +736,12 @@ static int smc_setup(struct pcmcia_device *link)
744 } 736 }
745 } 737 }
746 /* Try the third string in the Version 1 Version/ID tuple. */ 738 /* Try the third string in the Version 1 Version/ID tuple. */
747 tuple->DesiredTuple = CISTPL_VERS_1; 739 if (link->prod_id[2]) {
748 if (first_tuple(link, tuple, parse) != CS_SUCCESS) { 740 station_addr = link->prod_id[2];
749 rc = -1; 741 if (cvt_ascii_address(dev, station_addr) == 0) {
750 goto free_cfg_mem; 742 rc = 0;
751 } 743 goto free_cfg_mem;
752 station_addr = parse->version_1.str + parse->version_1.ofs[2]; 744 }
753 if (cvt_ascii_address(dev, station_addr) == 0) {
754 rc = 0;
755 goto free_cfg_mem;
756 } 745 }
757 746
758 rc = -1; 747 rc = -1;
@@ -970,10 +959,6 @@ static int smc91c92_config(struct pcmcia_device *link)
970{ 959{
971 struct net_device *dev = link->priv; 960 struct net_device *dev = link->priv;
972 struct smc_private *smc = netdev_priv(dev); 961 struct smc_private *smc = netdev_priv(dev);
973 struct smc_cfg_mem *cfg_mem;
974 tuple_t *tuple;
975 cisparse_t *parse;
976 u_char *buf;
977 char *name; 962 char *name;
978 int i, j, rev; 963 int i, j, rev;
979 kio_addr_t ioaddr; 964 kio_addr_t ioaddr;
@@ -981,30 +966,8 @@ static int smc91c92_config(struct pcmcia_device *link)
981 966
982 DEBUG(0, "smc91c92_config(0x%p)\n", link); 967 DEBUG(0, "smc91c92_config(0x%p)\n", link);
983 968
984 cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL); 969 smc->manfid = link->manf_id;
985 if (!cfg_mem) 970 smc->cardid = link->card_id;
986 goto config_failed;
987
988 tuple = &cfg_mem->tuple;
989 parse = &cfg_mem->parse;
990 buf = cfg_mem->buf;
991
992 tuple->Attributes = tuple->TupleOffset = 0;
993 tuple->TupleData = (cisdata_t *)buf;
994 tuple->TupleDataMax = 64;
995
996 tuple->DesiredTuple = CISTPL_CONFIG;
997 i = first_tuple(link, tuple, parse);
998 CS_EXIT_TEST(i, ParseTuple, config_failed);
999 link->conf.ConfigBase = parse->config.base;
1000 link->conf.Present = parse->config.rmask[0];
1001
1002 tuple->DesiredTuple = CISTPL_MANFID;
1003 tuple->Attributes = TUPLE_RETURN_COMMON;
1004 if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
1005 smc->manfid = parse->manfid.manf;
1006 smc->cardid = parse->manfid.card;
1007 }
1008 971
1009 if ((smc->manfid == MANFID_OSITECH) && 972 if ((smc->manfid == MANFID_OSITECH) &&
1010 (smc->cardid != PRODID_OSITECH_SEVEN)) { 973 (smc->cardid != PRODID_OSITECH_SEVEN)) {
@@ -1134,14 +1097,12 @@ static int smc91c92_config(struct pcmcia_device *link)
1134 printk(KERN_NOTICE " No MII transceivers found!\n"); 1097 printk(KERN_NOTICE " No MII transceivers found!\n");
1135 } 1098 }
1136 } 1099 }
1137 kfree(cfg_mem);
1138 return 0; 1100 return 0;
1139 1101
1140config_undo: 1102config_undo:
1141 unregister_netdev(dev); 1103 unregister_netdev(dev);
1142config_failed: /* CS_EXIT_TEST() calls jump to here... */ 1104config_failed: /* CS_EXIT_TEST() calls jump to here... */
1143 smc91c92_release(link); 1105 smc91c92_release(link);
1144 kfree(cfg_mem);
1145 return -ENODEV; 1106 return -ENODEV;
1146} /* smc91c92_config */ 1107} /* smc91c92_config */
1147 1108
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index f3914f58d67f..8478dca3d8d1 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -332,6 +332,7 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id);
332 */ 332 */
333 333
334typedef struct local_info_t { 334typedef struct local_info_t {
335 struct net_device *dev;
335 struct pcmcia_device *p_dev; 336 struct pcmcia_device *p_dev;
336 dev_node_t node; 337 dev_node_t node;
337 struct net_device_stats stats; 338 struct net_device_stats stats;
@@ -353,7 +354,7 @@ typedef struct local_info_t {
353 */ 354 */
354static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); 355static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
355static void do_tx_timeout(struct net_device *dev); 356static void do_tx_timeout(struct net_device *dev);
356static void xirc2ps_tx_timeout_task(void *data); 357static void xirc2ps_tx_timeout_task(struct work_struct *work);
357static struct net_device_stats *do_get_stats(struct net_device *dev); 358static struct net_device_stats *do_get_stats(struct net_device *dev);
358static void set_addresses(struct net_device *dev); 359static void set_addresses(struct net_device *dev);
359static void set_multicast_list(struct net_device *dev); 360static void set_multicast_list(struct net_device *dev);
@@ -567,6 +568,7 @@ xirc2ps_probe(struct pcmcia_device *link)
567 if (!dev) 568 if (!dev)
568 return -ENOMEM; 569 return -ENOMEM;
569 local = netdev_priv(dev); 570 local = netdev_priv(dev);
571 local->dev = dev;
570 local->p_dev = link; 572 local->p_dev = link;
571 link->priv = dev; 573 link->priv = dev;
572 574
@@ -591,7 +593,7 @@ xirc2ps_probe(struct pcmcia_device *link)
591#ifdef HAVE_TX_TIMEOUT 593#ifdef HAVE_TX_TIMEOUT
592 dev->tx_timeout = do_tx_timeout; 594 dev->tx_timeout = do_tx_timeout;
593 dev->watchdog_timeo = TX_TIMEOUT; 595 dev->watchdog_timeo = TX_TIMEOUT;
594 INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); 596 INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task);
595#endif 597#endif
596 598
597 return xirc2ps_config(link); 599 return xirc2ps_config(link);
@@ -707,22 +709,11 @@ set_card_type(struct pcmcia_device *link, const void *s)
707 * Returns: true if this is a CE2 709 * Returns: true if this is a CE2
708 */ 710 */
709static int 711static int
710has_ce2_string(struct pcmcia_device * link) 712has_ce2_string(struct pcmcia_device * p_dev)
711{ 713{
712 tuple_t tuple; 714 if (p_dev->prod_id[2] && strstr(p_dev->prod_id[2], "CE2"))
713 cisparse_t parse; 715 return 1;
714 u_char buf[256]; 716 return 0;
715
716 tuple.Attributes = 0;
717 tuple.TupleData = buf;
718 tuple.TupleDataMax = 254;
719 tuple.TupleOffset = 0;
720 tuple.DesiredTuple = CISTPL_VERS_1;
721 if (!first_tuple(link, &tuple, &parse) && parse.version_1.ns > 2) {
722 if (strstr(parse.version_1.str + parse.version_1.ofs[2], "CE2"))
723 return 1;
724 }
725 return 0;
726} 717}
727 718
728/**************** 719/****************
@@ -792,13 +783,6 @@ xirc2ps_config(struct pcmcia_device * link)
792 goto failure; 783 goto failure;
793 } 784 }
794 785
795 /* get configuration stuff */
796 tuple.DesiredTuple = CISTPL_CONFIG;
797 if ((err=first_tuple(link, &tuple, &parse)))
798 goto cis_error;
799 link->conf.ConfigBase = parse.config.base;
800 link->conf.Present = parse.config.rmask[0];
801
802 /* get the ethernet address from the CIS */ 786 /* get the ethernet address from the CIS */
803 tuple.DesiredTuple = CISTPL_FUNCE; 787 tuple.DesiredTuple = CISTPL_FUNCE;
804 for (err = first_tuple(link, &tuple, &parse); !err; 788 for (err = first_tuple(link, &tuple, &parse); !err;
@@ -1062,8 +1046,6 @@ xirc2ps_config(struct pcmcia_device * link)
1062 xirc2ps_release(link); 1046 xirc2ps_release(link);
1063 return -ENODEV; 1047 return -ENODEV;
1064 1048
1065 cis_error:
1066 printk(KNOT_XIRC "unable to parse CIS\n");
1067 failure: 1049 failure:
1068 return -ENODEV; 1050 return -ENODEV;
1069} /* xirc2ps_config */ 1051} /* xirc2ps_config */
@@ -1344,9 +1326,11 @@ xirc2ps_interrupt(int irq, void *dev_id)
1344/*====================================================================*/ 1326/*====================================================================*/
1345 1327
1346static void 1328static void
1347xirc2ps_tx_timeout_task(void *data) 1329xirc2ps_tx_timeout_task(struct work_struct *work)
1348{ 1330{
1349 struct net_device *dev = data; 1331 local_info_t *local =
1332 container_of(work, local_info_t, tx_timeout_task);
1333 struct net_device *dev = local->dev;
1350 /* reset the card */ 1334 /* reset the card */
1351 do_reset(dev,1); 1335 do_reset(dev,1);
1352 dev->trans_start = jiffies; 1336 dev->trans_start = jiffies;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 88237bdb5255..4044bb1ada86 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -397,7 +397,7 @@ out_unlock:
397EXPORT_SYMBOL(phy_start_aneg); 397EXPORT_SYMBOL(phy_start_aneg);
398 398
399 399
400static void phy_change(void *data); 400static void phy_change(struct work_struct *work);
401static void phy_timer(unsigned long data); 401static void phy_timer(unsigned long data);
402 402
403/* phy_start_machine: 403/* phy_start_machine:
@@ -555,7 +555,7 @@ int phy_start_interrupts(struct phy_device *phydev)
555{ 555{
556 int err = 0; 556 int err = 0;
557 557
558 INIT_WORK(&phydev->phy_queue, phy_change, phydev); 558 INIT_WORK(&phydev->phy_queue, phy_change);
559 559
560 if (request_irq(phydev->irq, phy_interrupt, 560 if (request_irq(phydev->irq, phy_interrupt,
561 IRQF_SHARED, 561 IRQF_SHARED,
@@ -598,10 +598,11 @@ EXPORT_SYMBOL(phy_stop_interrupts);
598 598
599 599
600/* Scheduled by the phy_interrupt/timer to handle PHY changes */ 600/* Scheduled by the phy_interrupt/timer to handle PHY changes */
601static void phy_change(void *data) 601static void phy_change(struct work_struct *work)
602{ 602{
603 int err; 603 int err;
604 struct phy_device *phydev = data; 604 struct phy_device *phydev =
605 container_of(work, struct phy_device, phy_queue);
605 606
606 err = phy_disable_interrupts(phydev); 607 err = phy_disable_interrupts(phydev);
607 608
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 71afb274498f..6bb085f54437 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -138,9 +138,9 @@ static const unsigned int net_debug = NET_DEBUG;
138#define PLIP_NIBBLE_WAIT 3000 138#define PLIP_NIBBLE_WAIT 3000
139 139
140/* Bottom halves */ 140/* Bottom halves */
141static void plip_kick_bh(struct net_device *dev); 141static void plip_kick_bh(struct work_struct *work);
142static void plip_bh(struct net_device *dev); 142static void plip_bh(struct work_struct *work);
143static void plip_timer_bh(struct net_device *dev); 143static void plip_timer_bh(struct work_struct *work);
144 144
145/* Interrupt handler */ 145/* Interrupt handler */
146static void plip_interrupt(int irq, void *dev_id); 146static void plip_interrupt(int irq, void *dev_id);
@@ -207,9 +207,10 @@ struct plip_local {
207 207
208struct net_local { 208struct net_local {
209 struct net_device_stats enet_stats; 209 struct net_device_stats enet_stats;
210 struct net_device *dev;
210 struct work_struct immediate; 211 struct work_struct immediate;
211 struct work_struct deferred; 212 struct delayed_work deferred;
212 struct work_struct timer; 213 struct delayed_work timer;
213 struct plip_local snd_data; 214 struct plip_local snd_data;
214 struct plip_local rcv_data; 215 struct plip_local rcv_data;
215 struct pardevice *pardev; 216 struct pardevice *pardev;
@@ -306,11 +307,11 @@ plip_init_netdev(struct net_device *dev)
306 nl->nibble = PLIP_NIBBLE_WAIT; 307 nl->nibble = PLIP_NIBBLE_WAIT;
307 308
308 /* Initialize task queue structures */ 309 /* Initialize task queue structures */
309 INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev); 310 INIT_WORK(&nl->immediate, plip_bh);
310 INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev); 311 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
311 312
312 if (dev->irq == -1) 313 if (dev->irq == -1)
313 INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev); 314 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
314 315
315 spin_lock_init(&nl->lock); 316 spin_lock_init(&nl->lock);
316} 317}
@@ -319,9 +320,10 @@ plip_init_netdev(struct net_device *dev)
319 This routine is kicked by do_timer(). 320 This routine is kicked by do_timer().
320 Request `plip_bh' to be invoked. */ 321 Request `plip_bh' to be invoked. */
321static void 322static void
322plip_kick_bh(struct net_device *dev) 323plip_kick_bh(struct work_struct *work)
323{ 324{
324 struct net_local *nl = netdev_priv(dev); 325 struct net_local *nl =
326 container_of(work, struct net_local, deferred.work);
325 327
326 if (nl->is_deferred) 328 if (nl->is_deferred)
327 schedule_work(&nl->immediate); 329 schedule_work(&nl->immediate);
@@ -362,9 +364,9 @@ static const plip_func connection_state_table[] =
362 364
363/* Bottom half handler of PLIP. */ 365/* Bottom half handler of PLIP. */
364static void 366static void
365plip_bh(struct net_device *dev) 367plip_bh(struct work_struct *work)
366{ 368{
367 struct net_local *nl = netdev_priv(dev); 369 struct net_local *nl = container_of(work, struct net_local, immediate);
368 struct plip_local *snd = &nl->snd_data; 370 struct plip_local *snd = &nl->snd_data;
369 struct plip_local *rcv = &nl->rcv_data; 371 struct plip_local *rcv = &nl->rcv_data;
370 plip_func f; 372 plip_func f;
@@ -372,20 +374,21 @@ plip_bh(struct net_device *dev)
372 374
373 nl->is_deferred = 0; 375 nl->is_deferred = 0;
374 f = connection_state_table[nl->connection]; 376 f = connection_state_table[nl->connection];
375 if ((r = (*f)(dev, nl, snd, rcv)) != OK 377 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
376 && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) { 378 && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
377 nl->is_deferred = 1; 379 nl->is_deferred = 1;
378 schedule_delayed_work(&nl->deferred, 1); 380 schedule_delayed_work(&nl->deferred, 1);
379 } 381 }
380} 382}
381 383
382static void 384static void
383plip_timer_bh(struct net_device *dev) 385plip_timer_bh(struct work_struct *work)
384{ 386{
385 struct net_local *nl = netdev_priv(dev); 387 struct net_local *nl =
388 container_of(work, struct net_local, timer.work);
386 389
387 if (!(atomic_read (&nl->kill_timer))) { 390 if (!(atomic_read (&nl->kill_timer))) {
388 plip_interrupt (-1, dev); 391 plip_interrupt (-1, nl->dev);
389 392
390 schedule_delayed_work(&nl->timer, 1); 393 schedule_delayed_work(&nl->timer, 1);
391 } 394 }
@@ -1284,6 +1287,7 @@ static void plip_attach (struct parport *port)
1284 } 1287 }
1285 1288
1286 nl = netdev_priv(dev); 1289 nl = netdev_priv(dev);
1290 nl->dev = dev;
1287 nl->pardev = parport_register_device(port, name, plip_preempt, 1291 nl->pardev = parport_register_device(port, name, plip_preempt,
1288 plip_wakeup, plip_interrupt, 1292 plip_wakeup, plip_interrupt,
1289 0, dev); 1293 0, dev);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index ec640f6229ae..d79d141a601d 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2008,7 +2008,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2008 "%s: Another function issued a reset to the " 2008 "%s: Another function issued a reset to the "
2009 "chip. ISR value = %x.\n", ndev->name, value); 2009 "chip. ISR value = %x.\n", ndev->name, value);
2010 } 2010 }
2011 queue_work(qdev->workqueue, &qdev->reset_work); 2011 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2012 spin_unlock(&qdev->adapter_lock); 2012 spin_unlock(&qdev->adapter_lock);
2013 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2013 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2014 ql_disable_interrupts(qdev); 2014 ql_disable_interrupts(qdev);
@@ -3182,11 +3182,13 @@ static void ql3xxx_tx_timeout(struct net_device *ndev)
3182 /* 3182 /*
3183 * Wake up the worker to process this event. 3183 * Wake up the worker to process this event.
3184 */ 3184 */
3185 queue_work(qdev->workqueue, &qdev->tx_timeout_work); 3185 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3186} 3186}
3187 3187
3188static void ql_reset_work(struct ql3_adapter *qdev) 3188static void ql_reset_work(struct work_struct *work)
3189{ 3189{
3190 struct ql3_adapter *qdev =
3191 container_of(work, struct ql3_adapter, reset_work.work);
3190 struct net_device *ndev = qdev->ndev; 3192 struct net_device *ndev = qdev->ndev;
3191 u32 value; 3193 u32 value;
3192 struct ql_tx_buf_cb *tx_cb; 3194 struct ql_tx_buf_cb *tx_cb;
@@ -3278,9 +3280,12 @@ static void ql_reset_work(struct ql3_adapter *qdev)
3278 } 3280 }
3279} 3281}
3280 3282
3281static void ql_tx_timeout_work(struct ql3_adapter *qdev) 3283static void ql_tx_timeout_work(struct work_struct *work)
3282{ 3284{
3283 ql_cycle_adapter(qdev,QL_DO_RESET); 3285 struct ql3_adapter *qdev =
3286 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3287
3288 ql_cycle_adapter(qdev, QL_DO_RESET);
3284} 3289}
3285 3290
3286static void ql_get_board_info(struct ql3_adapter *qdev) 3291static void ql_get_board_info(struct ql3_adapter *qdev)
@@ -3459,9 +3464,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3459 netif_stop_queue(ndev); 3464 netif_stop_queue(ndev);
3460 3465
3461 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3466 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3462 INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev); 3467 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3463 INIT_WORK(&qdev->tx_timeout_work, 3468 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3464 (void (*)(void *))ql_tx_timeout_work, qdev);
3465 3469
3466 init_timer(&qdev->adapter_timer); 3470 init_timer(&qdev->adapter_timer);
3467 qdev->adapter_timer.function = ql3xxx_timer; 3471 qdev->adapter_timer.function = ql3xxx_timer;
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 65da2c0bfda6..ea94de7fd071 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -1186,8 +1186,8 @@ struct ql3_adapter {
1186 u32 numPorts; 1186 u32 numPorts;
1187 struct net_device_stats stats; 1187 struct net_device_stats stats;
1188 struct workqueue_struct *workqueue; 1188 struct workqueue_struct *workqueue;
1189 struct work_struct reset_work; 1189 struct delayed_work reset_work;
1190 struct work_struct tx_timeout_work; 1190 struct delayed_work tx_timeout_work;
1191 u32 max_frame_size; 1191 u32 max_frame_size;
1192}; 1192};
1193 1193
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 45d3ca431957..85a392fab5cc 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -424,6 +424,7 @@ struct ring_info {
424struct rtl8169_private { 424struct rtl8169_private {
425 void __iomem *mmio_addr; /* memory map physical address */ 425 void __iomem *mmio_addr; /* memory map physical address */
426 struct pci_dev *pci_dev; /* Index of PCI device */ 426 struct pci_dev *pci_dev; /* Index of PCI device */
427 struct net_device *dev;
427 struct net_device_stats stats; /* statistics of net device */ 428 struct net_device_stats stats; /* statistics of net device */
428 spinlock_t lock; /* spin lock flag */ 429 spinlock_t lock; /* spin lock flag */
429 u32 msg_enable; 430 u32 msg_enable;
@@ -455,7 +456,7 @@ struct rtl8169_private {
455 void (*phy_reset_enable)(void __iomem *); 456 void (*phy_reset_enable)(void __iomem *);
456 unsigned int (*phy_reset_pending)(void __iomem *); 457 unsigned int (*phy_reset_pending)(void __iomem *);
457 unsigned int (*link_ok)(void __iomem *); 458 unsigned int (*link_ok)(void __iomem *);
458 struct work_struct task; 459 struct delayed_work task;
459 unsigned wol_enabled : 1; 460 unsigned wol_enabled : 1;
460}; 461};
461 462
@@ -1510,6 +1511,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1510 SET_MODULE_OWNER(dev); 1511 SET_MODULE_OWNER(dev);
1511 SET_NETDEV_DEV(dev, &pdev->dev); 1512 SET_NETDEV_DEV(dev, &pdev->dev);
1512 tp = netdev_priv(dev); 1513 tp = netdev_priv(dev);
1514 tp->dev = dev;
1513 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 1515 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1514 1516
1515 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 1517 /* enable device (incl. PCI PM wakeup and hotplug setup) */
@@ -1782,7 +1784,7 @@ static int rtl8169_open(struct net_device *dev)
1782 if (retval < 0) 1784 if (retval < 0)
1783 goto err_free_rx; 1785 goto err_free_rx;
1784 1786
1785 INIT_WORK(&tp->task, NULL, dev); 1787 INIT_DELAYED_WORK(&tp->task, NULL);
1786 1788
1787 rtl8169_hw_start(dev); 1789 rtl8169_hw_start(dev);
1788 1790
@@ -2105,11 +2107,11 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
2105 tp->cur_tx = tp->dirty_tx = 0; 2107 tp->cur_tx = tp->dirty_tx = 0;
2106} 2108}
2107 2109
2108static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *)) 2110static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
2109{ 2111{
2110 struct rtl8169_private *tp = netdev_priv(dev); 2112 struct rtl8169_private *tp = netdev_priv(dev);
2111 2113
2112 PREPARE_WORK(&tp->task, task, dev); 2114 PREPARE_DELAYED_WORK(&tp->task, task);
2113 schedule_delayed_work(&tp->task, 4); 2115 schedule_delayed_work(&tp->task, 4);
2114} 2116}
2115 2117
@@ -2128,9 +2130,11 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
2128 netif_poll_enable(dev); 2130 netif_poll_enable(dev);
2129} 2131}
2130 2132
2131static void rtl8169_reinit_task(void *_data) 2133static void rtl8169_reinit_task(struct work_struct *work)
2132{ 2134{
2133 struct net_device *dev = _data; 2135 struct rtl8169_private *tp =
2136 container_of(work, struct rtl8169_private, task.work);
2137 struct net_device *dev = tp->dev;
2134 int ret; 2138 int ret;
2135 2139
2136 if (netif_running(dev)) { 2140 if (netif_running(dev)) {
@@ -2153,10 +2157,11 @@ static void rtl8169_reinit_task(void *_data)
2153 } 2157 }
2154} 2158}
2155 2159
2156static void rtl8169_reset_task(void *_data) 2160static void rtl8169_reset_task(struct work_struct *work)
2157{ 2161{
2158 struct net_device *dev = _data; 2162 struct rtl8169_private *tp =
2159 struct rtl8169_private *tp = netdev_priv(dev); 2163 container_of(work, struct rtl8169_private, task.work);
2164 struct net_device *dev = tp->dev;
2160 2165
2161 if (!netif_running(dev)) 2166 if (!netif_running(dev))
2162 return; 2167 return;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 33569ec9dbfc..250cdbeefdfd 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5872,9 +5872,9 @@ static void s2io_tasklet(unsigned long dev_addr)
5872 * Description: Sets the link status for the adapter 5872 * Description: Sets the link status for the adapter
5873 */ 5873 */
5874 5874
5875static void s2io_set_link(unsigned long data) 5875static void s2io_set_link(struct work_struct *work)
5876{ 5876{
5877 nic_t *nic = (nic_t *) data; 5877 nic_t *nic = container_of(work, nic_t, set_link_task);
5878 struct net_device *dev = nic->dev; 5878 struct net_device *dev = nic->dev;
5879 XENA_dev_config_t __iomem *bar0 = nic->bar0; 5879 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5880 register u64 val64; 5880 register u64 val64;
@@ -6379,10 +6379,10 @@ static int s2io_card_up(nic_t * sp)
6379 * spin lock. 6379 * spin lock.
6380 */ 6380 */
6381 6381
6382static void s2io_restart_nic(unsigned long data) 6382static void s2io_restart_nic(struct work_struct *work)
6383{ 6383{
6384 struct net_device *dev = (struct net_device *) data; 6384 nic_t *sp = container_of(work, nic_t, rst_timer_task);
6385 nic_t *sp = dev->priv; 6385 struct net_device *dev = sp->dev;
6386 6386
6387 s2io_card_down(sp); 6387 s2io_card_down(sp);
6388 if (s2io_card_up(sp)) { 6388 if (s2io_card_up(sp)) {
@@ -6992,10 +6992,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6992 6992
6993 dev->tx_timeout = &s2io_tx_watchdog; 6993 dev->tx_timeout = &s2io_tx_watchdog;
6994 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 6994 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
6995 INIT_WORK(&sp->rst_timer_task, 6995 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
6996 (void (*)(void *)) s2io_restart_nic, dev); 6996 INIT_WORK(&sp->set_link_task, s2io_set_link);
6997 INIT_WORK(&sp->set_link_task,
6998 (void (*)(void *)) s2io_set_link, sp);
6999 6997
7000 pci_save_state(sp->pdev); 6998 pci_save_state(sp->pdev);
7001 6999
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 12b719f4d00f..3b0bafd273c8 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1000,7 +1000,7 @@ s2io_msix_fifo_handle(int irq, void *dev_id);
1000static irqreturn_t s2io_isr(int irq, void *dev_id); 1000static irqreturn_t s2io_isr(int irq, void *dev_id);
1001static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); 1001static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
1002static const struct ethtool_ops netdev_ethtool_ops; 1002static const struct ethtool_ops netdev_ethtool_ops;
1003static void s2io_set_link(unsigned long data); 1003static void s2io_set_link(struct work_struct *work);
1004static int s2io_set_swapper(nic_t * sp); 1004static int s2io_set_swapper(nic_t * sp);
1005static void s2io_card_down(nic_t *nic); 1005static void s2io_card_down(nic_t *nic);
1006static int s2io_card_up(nic_t *nic); 1006static int s2io_card_up(nic_t *nic);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index aaba458584fb..b70ed79d4121 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -280,6 +280,7 @@ enum sis190_feature {
280struct sis190_private { 280struct sis190_private {
281 void __iomem *mmio_addr; 281 void __iomem *mmio_addr;
282 struct pci_dev *pci_dev; 282 struct pci_dev *pci_dev;
283 struct net_device *dev;
283 struct net_device_stats stats; 284 struct net_device_stats stats;
284 spinlock_t lock; 285 spinlock_t lock;
285 u32 rx_buf_sz; 286 u32 rx_buf_sz;
@@ -897,10 +898,11 @@ static void sis190_hw_start(struct net_device *dev)
897 netif_start_queue(dev); 898 netif_start_queue(dev);
898} 899}
899 900
900static void sis190_phy_task(void * data) 901static void sis190_phy_task(struct work_struct *work)
901{ 902{
902 struct net_device *dev = data; 903 struct sis190_private *tp =
903 struct sis190_private *tp = netdev_priv(dev); 904 container_of(work, struct sis190_private, phy_task);
905 struct net_device *dev = tp->dev;
904 void __iomem *ioaddr = tp->mmio_addr; 906 void __iomem *ioaddr = tp->mmio_addr;
905 int phy_id = tp->mii_if.phy_id; 907 int phy_id = tp->mii_if.phy_id;
906 u16 val; 908 u16 val;
@@ -1047,7 +1049,7 @@ static int sis190_open(struct net_device *dev)
1047 if (rc < 0) 1049 if (rc < 0)
1048 goto err_free_rx_1; 1050 goto err_free_rx_1;
1049 1051
1050 INIT_WORK(&tp->phy_task, sis190_phy_task, dev); 1052 INIT_WORK(&tp->phy_task, sis190_phy_task);
1051 1053
1052 sis190_request_timer(dev); 1054 sis190_request_timer(dev);
1053 1055
@@ -1436,6 +1438,7 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1436 SET_NETDEV_DEV(dev, &pdev->dev); 1438 SET_NETDEV_DEV(dev, &pdev->dev);
1437 1439
1438 tp = netdev_priv(dev); 1440 tp = netdev_priv(dev);
1441 tp->dev = dev;
1439 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); 1442 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1440 1443
1441 rc = pci_enable_device(pdev); 1444 rc = pci_enable_device(pdev);
@@ -1798,7 +1801,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1798 1801
1799 sis190_init_rxfilter(dev); 1802 sis190_init_rxfilter(dev);
1800 1803
1801 INIT_WORK(&tp->phy_task, sis190_phy_task, dev); 1804 INIT_WORK(&tp->phy_task, sis190_phy_task);
1802 1805
1803 dev->open = sis190_open; 1806 dev->open = sis190_open;
1804 dev->stop = sis190_close; 1807 dev->stop = sis190_close;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 5513907e8393..b60f0451f6cd 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1327,10 +1327,11 @@ static void xm_check_link(struct net_device *dev)
1327 * Since internal PHY is wired to a level triggered pin, can't 1327 * Since internal PHY is wired to a level triggered pin, can't
1328 * get an interrupt when carrier is detected. 1328 * get an interrupt when carrier is detected.
1329 */ 1329 */
1330static void xm_link_timer(void *arg) 1330static void xm_link_timer(struct work_struct *work)
1331{ 1331{
1332 struct net_device *dev = arg; 1332 struct skge_port *skge =
1333 struct skge_port *skge = netdev_priv(arg); 1333 container_of(work, struct skge_port, link_thread.work);
1334 struct net_device *dev = skge->netdev;
1334 struct skge_hw *hw = skge->hw; 1335 struct skge_hw *hw = skge->hw;
1335 int port = skge->port; 1336 int port = skge->port;
1336 1337
@@ -3072,9 +3073,9 @@ static void skge_error_irq(struct skge_hw *hw)
3072 * because accessing phy registers requires spin wait which might 3073 * because accessing phy registers requires spin wait which might
3073 * cause excess interrupt latency. 3074 * cause excess interrupt latency.
3074 */ 3075 */
3075static void skge_extirq(void *arg) 3076static void skge_extirq(struct work_struct *work)
3076{ 3077{
3077 struct skge_hw *hw = arg; 3078 struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
3078 int port; 3079 int port;
3079 3080
3080 mutex_lock(&hw->phy_mutex); 3081 mutex_lock(&hw->phy_mutex);
@@ -3456,7 +3457,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3456 skge->port = port; 3457 skge->port = port;
3457 3458
3458 /* Only used for Genesis XMAC */ 3459 /* Only used for Genesis XMAC */
3459 INIT_WORK(&skge->link_thread, xm_link_timer, dev); 3460 INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
3460 3461
3461 if (hw->chip_id != CHIP_ID_GENESIS) { 3462 if (hw->chip_id != CHIP_ID_GENESIS) {
3462 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3463 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@ -3543,7 +3544,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3543 3544
3544 hw->pdev = pdev; 3545 hw->pdev = pdev;
3545 mutex_init(&hw->phy_mutex); 3546 mutex_init(&hw->phy_mutex);
3546 INIT_WORK(&hw->phy_work, skge_extirq, hw); 3547 INIT_WORK(&hw->phy_work, skge_extirq);
3547 spin_lock_init(&hw->hw_lock); 3548 spin_lock_init(&hw->hw_lock);
3548 3549
3549 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3550 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 537c0aaa1db8..23e5275d920c 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2456,7 +2456,7 @@ struct skge_port {
2456 2456
2457 struct net_device_stats net_stats; 2457 struct net_device_stats net_stats;
2458 2458
2459 struct work_struct link_thread; 2459 struct delayed_work link_thread;
2460 enum pause_control flow_control; 2460 enum pause_control flow_control;
2461 enum pause_status flow_status; 2461 enum pause_status flow_status;
2462 u8 rx_csum; 2462 u8 rx_csum;
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 95b6478f55c6..e62a9586fb95 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -210,6 +210,7 @@ struct smc_local {
210 210
211 /* work queue */ 211 /* work queue */
212 struct work_struct phy_configure; 212 struct work_struct phy_configure;
213 struct net_device *dev;
213 int work_pending; 214 int work_pending;
214 215
215 spinlock_t lock; 216 spinlock_t lock;
@@ -1114,10 +1115,11 @@ static void smc_phy_check_media(struct net_device *dev, int init)
1114 * of autonegotiation.) If the RPC ANEG bit is cleared, the selection 1115 * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
1115 * is controlled by the RPC SPEED and RPC DPLX bits. 1116 * is controlled by the RPC SPEED and RPC DPLX bits.
1116 */ 1117 */
1117static void smc_phy_configure(void *data) 1118static void smc_phy_configure(struct work_struct *work)
1118{ 1119{
1119 struct net_device *dev = data; 1120 struct smc_local *lp =
1120 struct smc_local *lp = netdev_priv(dev); 1121 container_of(work, struct smc_local, phy_configure);
1122 struct net_device *dev = lp->dev;
1121 void __iomem *ioaddr = lp->base; 1123 void __iomem *ioaddr = lp->base;
1122 int phyaddr = lp->mii.phy_id; 1124 int phyaddr = lp->mii.phy_id;
1123 int my_phy_caps; /* My PHY capabilities */ 1125 int my_phy_caps; /* My PHY capabilities */
@@ -1592,7 +1594,7 @@ smc_open(struct net_device *dev)
1592 1594
1593 /* Configure the PHY, initialize the link state */ 1595 /* Configure the PHY, initialize the link state */
1594 if (lp->phy_type != 0) 1596 if (lp->phy_type != 0)
1595 smc_phy_configure(dev); 1597 smc_phy_configure(&lp->phy_configure);
1596 else { 1598 else {
1597 spin_lock_irq(&lp->lock); 1599 spin_lock_irq(&lp->lock);
1598 smc_10bt_check_media(dev, 1); 1600 smc_10bt_check_media(dev, 1);
@@ -1972,7 +1974,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
1972#endif 1974#endif
1973 1975
1974 tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); 1976 tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
1975 INIT_WORK(&lp->phy_configure, smc_phy_configure, dev); 1977 INIT_WORK(&lp->phy_configure, smc_phy_configure);
1978 lp->dev = dev;
1976 lp->mii.phy_id_mask = 0x1f; 1979 lp->mii.phy_id_mask = 0x1f;
1977 lp->mii.reg_num_mask = 0x1f; 1980 lp->mii.reg_num_mask = 0x1f;
1978 lp->mii.force_media = 0; 1981 lp->mii.force_media = 0;
@@ -2322,7 +2325,7 @@ static int smc_drv_resume(struct platform_device *dev)
2322 smc_reset(ndev); 2325 smc_reset(ndev);
2323 smc_enable(ndev); 2326 smc_enable(ndev);
2324 if (lp->phy_type != 0) 2327 if (lp->phy_type != 0)
2325 smc_phy_configure(ndev); 2328 smc_phy_configure(&lp->phy_configure);
2326 netif_device_attach(ndev); 2329 netif_device_attach(ndev);
2327 } 2330 }
2328 } 2331 }
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 13e0a43e423b..ebb6aa39f9c7 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1939,10 +1939,11 @@ spider_net_stop(struct net_device *netdev)
1939 * called as task when tx hangs, resets interface (if interface is up) 1939 * called as task when tx hangs, resets interface (if interface is up)
1940 */ 1940 */
1941static void 1941static void
1942spider_net_tx_timeout_task(void *data) 1942spider_net_tx_timeout_task(struct work_struct *work)
1943{ 1943{
1944 struct net_device *netdev = data; 1944 struct spider_net_card *card =
1945 struct spider_net_card *card = netdev_priv(netdev); 1945 container_of(work, struct spider_net_card, tx_timeout_task);
1946 struct net_device *netdev = card->netdev;
1946 1947
1947 if (!(netdev->flags & IFF_UP)) 1948 if (!(netdev->flags & IFF_UP))
1948 goto out; 1949 goto out;
@@ -2116,7 +2117,7 @@ spider_net_alloc_card(void)
2116 card = netdev_priv(netdev); 2117 card = netdev_priv(netdev);
2117 card->netdev = netdev; 2118 card->netdev = netdev;
2118 card->msg_enable = SPIDER_NET_DEFAULT_MSG; 2119 card->msg_enable = SPIDER_NET_DEFAULT_MSG;
2119 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev); 2120 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
2120 init_waitqueue_head(&card->waitq); 2121 init_waitqueue_head(&card->waitq);
2121 atomic_set(&card->tx_timeout_task_counter, 0); 2122 atomic_set(&card->tx_timeout_task_counter, 0);
2122 2123
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index cf44e72399b9..785e4a535f9e 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2282,9 +2282,9 @@ static void gem_do_stop(struct net_device *dev, int wol)
2282 } 2282 }
2283} 2283}
2284 2284
2285static void gem_reset_task(void *data) 2285static void gem_reset_task(struct work_struct *work)
2286{ 2286{
2287 struct gem *gp = (struct gem *) data; 2287 struct gem *gp = container_of(work, struct gem, reset_task);
2288 2288
2289 mutex_lock(&gp->pm_mutex); 2289 mutex_lock(&gp->pm_mutex);
2290 2290
@@ -3044,7 +3044,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3044 gp->link_timer.function = gem_link_timer; 3044 gp->link_timer.function = gem_link_timer;
3045 gp->link_timer.data = (unsigned long) gp; 3045 gp->link_timer.data = (unsigned long) gp;
3046 3046
3047 INIT_WORK(&gp->reset_task, gem_reset_task, gp); 3047 INIT_WORK(&gp->reset_task, gem_reset_task);
3048 3048
3049 gp->lstate = link_down; 3049 gp->lstate = link_down;
3050 gp->timer_ticks = 0; 3050 gp->timer_ticks = 0;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index c20bb998e0e5..d9123c9adc1e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3654,9 +3654,9 @@ static void tg3_poll_controller(struct net_device *dev)
3654} 3654}
3655#endif 3655#endif
3656 3656
3657static void tg3_reset_task(void *_data) 3657static void tg3_reset_task(struct work_struct *work)
3658{ 3658{
3659 struct tg3 *tp = _data; 3659 struct tg3 *tp = container_of(work, struct tg3, reset_task);
3660 unsigned int restart_timer; 3660 unsigned int restart_timer;
3661 3661
3662 tg3_full_lock(tp, 0); 3662 tg3_full_lock(tp, 0);
@@ -11734,7 +11734,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11734#endif 11734#endif
11735 spin_lock_init(&tp->lock); 11735 spin_lock_init(&tp->lock);
11736 spin_lock_init(&tp->indirect_lock); 11736 spin_lock_init(&tp->indirect_lock);
11737 INIT_WORK(&tp->reset_task, tg3_reset_task, tp); 11737 INIT_WORK(&tp->reset_task, tg3_reset_task);
11738 11738
11739 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); 11739 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11740 if (tp->regs == 0UL) { 11740 if (tp->regs == 0UL) {
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index e14f5a00f65a..f85f00251123 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -296,6 +296,7 @@ static void TLan_SetMulticastList( struct net_device *);
296static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 296static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
297static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); 297static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent);
298static void TLan_tx_timeout( struct net_device *dev); 298static void TLan_tx_timeout( struct net_device *dev);
299static void TLan_tx_timeout_work(struct work_struct *work);
299static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 300static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
300 301
301static u32 TLan_HandleInvalid( struct net_device *, u16 ); 302static u32 TLan_HandleInvalid( struct net_device *, u16 );
@@ -562,6 +563,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
562 priv = netdev_priv(dev); 563 priv = netdev_priv(dev);
563 564
564 priv->pciDev = pdev; 565 priv->pciDev = pdev;
566 priv->dev = dev;
565 567
566 /* Is this a PCI device? */ 568 /* Is this a PCI device? */
567 if (pdev) { 569 if (pdev) {
@@ -634,7 +636,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
634 636
635 /* This will be used when we get an adapter error from 637 /* This will be used when we get an adapter error from
636 * within our irq handler */ 638 * within our irq handler */
637 INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev); 639 INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
638 640
639 spin_lock_init(&priv->lock); 641 spin_lock_init(&priv->lock);
640 642
@@ -1040,6 +1042,25 @@ static void TLan_tx_timeout(struct net_device *dev)
1040} 1042}
1041 1043
1042 1044
1045 /***************************************************************
1046 * TLan_tx_timeout_work
1047 *
1048 * Returns: nothing
1049 *
1050 * Params:
1051 * work work item of device which timed out
1052 *
1053 **************************************************************/
1054
1055static void TLan_tx_timeout_work(struct work_struct *work)
1056{
1057 TLanPrivateInfo *priv =
1058 container_of(work, TLanPrivateInfo, tlan_tqueue);
1059
1060 TLan_tx_timeout(priv->dev);
1061}
1062
1063
1043 1064
1044 /*************************************************************** 1065 /***************************************************************
1045 * TLan_StartTx 1066 * TLan_StartTx
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index a44e2f2ef62a..41ce0b665937 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -170,6 +170,7 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
170typedef struct tlan_private_tag { 170typedef struct tlan_private_tag {
171 struct net_device *nextDevice; 171 struct net_device *nextDevice;
172 struct pci_dev *pciDev; 172 struct pci_dev *pciDev;
173 struct net_device *dev;
173 void *dmaStorage; 174 void *dmaStorage;
174 dma_addr_t dmaStorageDMA; 175 dma_addr_t dmaStorageDMA;
175 unsigned int dmaSize; 176 unsigned int dmaSize;
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index fa3a2bb105ad..942b839ccc5b 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -26,10 +26,11 @@ static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
26 26
27/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list 27/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
28 of available transceivers. */ 28 of available transceivers. */
29void t21142_media_task(void *data) 29void t21142_media_task(struct work_struct *work)
30{ 30{
31 struct net_device *dev = data; 31 struct tulip_private *tp =
32 struct tulip_private *tp = netdev_priv(dev); 32 container_of(work, struct tulip_private, media_work);
33 struct net_device *dev = tp->dev;
33 void __iomem *ioaddr = tp->base_addr; 34 void __iomem *ioaddr = tp->base_addr;
34 int csr12 = ioread32(ioaddr + CSR12); 35 int csr12 = ioread32(ioaddr + CSR12);
35 int next_tick = 60*HZ; 36 int next_tick = 60*HZ;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 066e5d6bcbd8..df326fe1cc8f 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -18,10 +18,11 @@
18#include "tulip.h" 18#include "tulip.h"
19 19
20 20
21void tulip_media_task(void *data) 21void tulip_media_task(struct work_struct *work)
22{ 22{
23 struct net_device *dev = data; 23 struct tulip_private *tp =
24 struct tulip_private *tp = netdev_priv(dev); 24 container_of(work, struct tulip_private, media_work);
25 struct net_device *dev = tp->dev;
25 void __iomem *ioaddr = tp->base_addr; 26 void __iomem *ioaddr = tp->base_addr;
26 u32 csr12 = ioread32(ioaddr + CSR12); 27 u32 csr12 = ioread32(ioaddr + CSR12);
27 int next_tick = 2*HZ; 28 int next_tick = 2*HZ;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index ad107f45c7b1..25f25da76917 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -44,7 +44,7 @@ struct tulip_chip_table {
44 int valid_intrs; /* CSR7 interrupt enable settings */ 44 int valid_intrs; /* CSR7 interrupt enable settings */
45 int flags; 45 int flags;
46 void (*media_timer) (unsigned long); 46 void (*media_timer) (unsigned long);
47 void (*media_task) (void *); 47 work_func_t media_task;
48}; 48};
49 49
50 50
@@ -392,6 +392,7 @@ struct tulip_private {
392 int csr12_shadow; 392 int csr12_shadow;
393 int pad0; /* Used for 8-byte alignment */ 393 int pad0; /* Used for 8-byte alignment */
394 struct work_struct media_work; 394 struct work_struct media_work;
395 struct net_device *dev;
395}; 396};
396 397
397 398
@@ -406,7 +407,7 @@ struct eeprom_fixup {
406 407
407/* 21142.c */ 408/* 21142.c */
408extern u16 t21142_csr14[]; 409extern u16 t21142_csr14[];
409void t21142_media_task(void *data); 410void t21142_media_task(struct work_struct *work);
410void t21142_start_nway(struct net_device *dev); 411void t21142_start_nway(struct net_device *dev);
411void t21142_lnk_change(struct net_device *dev, int csr5); 412void t21142_lnk_change(struct net_device *dev, int csr5);
412 413
@@ -444,7 +445,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5);
444void pnic_timer(unsigned long data); 445void pnic_timer(unsigned long data);
445 446
446/* timer.c */ 447/* timer.c */
447void tulip_media_task(void *data); 448void tulip_media_task(struct work_struct *work);
448void mxic_timer(unsigned long data); 449void mxic_timer(unsigned long data);
449void comet_timer(unsigned long data); 450void comet_timer(unsigned long data);
450 451
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0aee618f883c..5a35354aa523 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1367,6 +1367,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1367 * it is zeroed and aligned in alloc_etherdev 1367 * it is zeroed and aligned in alloc_etherdev
1368 */ 1368 */
1369 tp = netdev_priv(dev); 1369 tp = netdev_priv(dev);
1370 tp->dev = dev;
1370 1371
1371 tp->rx_ring = pci_alloc_consistent(pdev, 1372 tp->rx_ring = pci_alloc_consistent(pdev,
1372 sizeof(struct tulip_rx_desc) * RX_RING_SIZE + 1373 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
@@ -1389,7 +1390,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1389 tp->timer.data = (unsigned long)dev; 1390 tp->timer.data = (unsigned long)dev;
1390 tp->timer.function = tulip_tbl[tp->chip_id].media_timer; 1391 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1391 1392
1392 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev); 1393 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1393 1394
1394 dev->base_addr = (unsigned long)ioaddr; 1395 dev->base_addr = (unsigned long)ioaddr;
1395 1396
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 931cbdf6d791..b2a23aed4428 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -125,8 +125,8 @@ static int cpc_tty_write_room(struct tty_struct *tty);
125static int cpc_tty_chars_in_buffer(struct tty_struct *tty); 125static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
126static void cpc_tty_flush_buffer(struct tty_struct *tty); 126static void cpc_tty_flush_buffer(struct tty_struct *tty);
127static void cpc_tty_hangup(struct tty_struct *tty); 127static void cpc_tty_hangup(struct tty_struct *tty);
128static void cpc_tty_rx_work(void *data); 128static void cpc_tty_rx_work(struct work_struct *work);
129static void cpc_tty_tx_work(void *data); 129static void cpc_tty_tx_work(struct work_struct *work);
130static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); 130static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
131static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); 131static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
132static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); 132static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
@@ -261,8 +261,8 @@ void cpc_tty_init(pc300dev_t *pc300dev)
261 cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; 261 cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
262 cpc_tty->pc300dev = pc300dev; 262 cpc_tty->pc300dev = pc300dev;
263 263
264 INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty); 264 INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work);
265 INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port); 265 INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work);
266 266
267 cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL; 267 cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
268 268
@@ -659,21 +659,23 @@ static void cpc_tty_hangup(struct tty_struct *tty)
659 * o call the line disc. read 659 * o call the line disc. read
660 * o free memory 660 * o free memory
661 */ 661 */
662static void cpc_tty_rx_work(void * data) 662static void cpc_tty_rx_work(struct work_struct *work)
663{ 663{
664 st_cpc_tty_area *cpc_tty;
664 unsigned long port; 665 unsigned long port;
665 int i, j; 666 int i, j;
666 st_cpc_tty_area *cpc_tty;
667 volatile st_cpc_rx_buf *buf; 667 volatile st_cpc_rx_buf *buf;
668 char flags=0,flg_rx=1; 668 char flags=0,flg_rx=1;
669 struct tty_ldisc *ld; 669 struct tty_ldisc *ld;
670 670
671 if (cpc_tty_cnt == 0) return; 671 if (cpc_tty_cnt == 0) return;
672
673 672
674 for (i=0; (i < 4) && flg_rx ; i++) { 673 for (i=0; (i < 4) && flg_rx ; i++) {
675 flg_rx = 0; 674 flg_rx = 0;
676 port = (unsigned long)data; 675
676 cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work);
677 port = cpc_tty - cpc_tty_area;
678
677 for (j=0; j < CPC_TTY_NPORTS; j++) { 679 for (j=0; j < CPC_TTY_NPORTS; j++) {
678 cpc_tty = &cpc_tty_area[port]; 680 cpc_tty = &cpc_tty_area[port];
679 681
@@ -882,9 +884,10 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
882 * o if need call line discipline wakeup 884 * o if need call line discipline wakeup
883 * o call wake_up_interruptible 885 * o call wake_up_interruptible
884 */ 886 */
885static void cpc_tty_tx_work(void *data) 887static void cpc_tty_tx_work(struct work_struct *work)
886{ 888{
887 st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; 889 st_cpc_tty_area *cpc_tty =
890 container_of(work, st_cpc_tty_area, tty_tx_work);
888 struct tty_struct *tty; 891 struct tty_struct *tty;
889 892
890 CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name); 893 CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index ac9437d497f0..f12355398fe7 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -219,21 +219,6 @@ static int airo_config(struct pcmcia_device *link)
219 dev = link->priv; 219 dev = link->priv;
220 220
221 DEBUG(0, "airo_config(0x%p)\n", link); 221 DEBUG(0, "airo_config(0x%p)\n", link);
222
223 /*
224 This reads the card's CONFIG tuple to find its configuration
225 registers.
226 */
227 tuple.DesiredTuple = CISTPL_CONFIG;
228 tuple.Attributes = 0;
229 tuple.TupleData = buf;
230 tuple.TupleDataMax = sizeof(buf);
231 tuple.TupleOffset = 0;
232 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
233 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
234 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
235 link->conf.ConfigBase = parse.config.base;
236 link->conf.Present = parse.config.rmask[0];
237 222
238 /* 223 /*
239 In this loop, we scan the CIS for configuration table entries, 224 In this loop, we scan the CIS for configuration table entries,
@@ -247,6 +232,10 @@ static int airo_config(struct pcmcia_device *link)
247 these things without consulting the CIS, and most client drivers 232 these things without consulting the CIS, and most client drivers
248 will only use the CIS to fill in implementation-defined details. 233 will only use the CIS to fill in implementation-defined details.
249 */ 234 */
235 tuple.Attributes = 0;
236 tuple.TupleData = buf;
237 tuple.TupleDataMax = sizeof(buf);
238 tuple.TupleOffset = 0;
250 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 239 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
251 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 240 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
252 while (1) { 241 while (1) {
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 5c410989c4d7..12617cd0b78e 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -244,17 +244,6 @@ static int atmel_config(struct pcmcia_device *link)
244 tuple.TupleOffset = 0; 244 tuple.TupleOffset = 0;
245 245
246 /* 246 /*
247 This reads the card's CONFIG tuple to find its configuration
248 registers.
249 */
250 tuple.DesiredTuple = CISTPL_CONFIG;
251 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
252 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
253 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
254 link->conf.ConfigBase = parse.config.base;
255 link->conf.Present = parse.config.rmask[0];
256
257 /*
258 In this loop, we scan the CIS for configuration table entries, 247 In this loop, we scan the CIS for configuration table entries,
259 each of which describes a valid card configuration, including 248 each of which describes a valid card configuration, including
260 voltage, IO window, memory window, and interrupt settings. 249 voltage, IO window, memory window, and interrupt settings.
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 94dfb92fab5c..8286678513b9 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -819,7 +819,7 @@ struct bcm43xx_private {
819 struct tasklet_struct isr_tasklet; 819 struct tasklet_struct isr_tasklet;
820 820
821 /* Periodic tasks */ 821 /* Periodic tasks */
822 struct work_struct periodic_work; 822 struct delayed_work periodic_work;
823 unsigned int periodic_state; 823 unsigned int periodic_state;
824 824
825 struct work_struct restart_work; 825 struct work_struct restart_work;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 5b3c27359a18..2ec2e5afce67 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3215,9 +3215,10 @@ static void do_periodic_work(struct bcm43xx_private *bcm)
3215 schedule_delayed_work(&bcm->periodic_work, HZ * 15); 3215 schedule_delayed_work(&bcm->periodic_work, HZ * 15);
3216} 3216}
3217 3217
3218static void bcm43xx_periodic_work_handler(void *d) 3218static void bcm43xx_periodic_work_handler(struct work_struct *work)
3219{ 3219{
3220 struct bcm43xx_private *bcm = d; 3220 struct bcm43xx_private *bcm =
3221 container_of(work, struct bcm43xx_private, periodic_work.work);
3221 struct net_device *net_dev = bcm->net_dev; 3222 struct net_device *net_dev = bcm->net_dev;
3222 unsigned long flags; 3223 unsigned long flags;
3223 u32 savedirqs = 0; 3224 u32 savedirqs = 0;
@@ -3279,11 +3280,11 @@ void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm)
3279 3280
3280void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) 3281void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
3281{ 3282{
3282 struct work_struct *work = &(bcm->periodic_work); 3283 struct delayed_work *work = &bcm->periodic_work;
3283 3284
3284 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); 3285 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
3285 INIT_WORK(work, bcm43xx_periodic_work_handler, bcm); 3286 INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
3286 schedule_work(work); 3287 schedule_delayed_work(work, 0);
3287} 3288}
3288 3289
3289static void bcm43xx_security_init(struct bcm43xx_private *bcm) 3290static void bcm43xx_security_init(struct bcm43xx_private *bcm)
@@ -3635,7 +3636,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
3635 bcm43xx_periodic_tasks_setup(bcm); 3636 bcm43xx_periodic_tasks_setup(bcm);
3636 3637
3637 /*FIXME: This should be handled by softmac instead. */ 3638 /*FIXME: This should be handled by softmac instead. */
3638 schedule_work(&bcm->softmac->associnfo.work); 3639 schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
3639 3640
3640out: 3641out:
3641 mutex_unlock(&(bcm)->mutex); 3642 mutex_unlock(&(bcm)->mutex);
@@ -4182,9 +4183,10 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev)
4182/* Hard-reset the chip. Do not call this directly. 4183/* Hard-reset the chip. Do not call this directly.
4183 * Use bcm43xx_controller_restart() 4184 * Use bcm43xx_controller_restart()
4184 */ 4185 */
4185static void bcm43xx_chip_reset(void *_bcm) 4186static void bcm43xx_chip_reset(struct work_struct *work)
4186{ 4187{
4187 struct bcm43xx_private *bcm = _bcm; 4188 struct bcm43xx_private *bcm =
4189 container_of(work, struct bcm43xx_private, restart_work);
4188 struct bcm43xx_phyinfo *phy; 4190 struct bcm43xx_phyinfo *phy;
4189 int err = -ENODEV; 4191 int err = -ENODEV;
4190 4192
@@ -4211,7 +4213,7 @@ void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason)
4211 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) 4213 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
4212 return; 4214 return;
4213 printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason); 4215 printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
4214 INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm); 4216 INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
4215 schedule_work(&bcm->restart_work); 4217 schedule_work(&bcm->restart_work);
4216} 4218}
4217 4219
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index e663518bd570..e89c890d16fd 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -35,7 +35,7 @@ int hostap_80211_get_hdrlen(u16 fc);
35struct net_device_stats *hostap_get_stats(struct net_device *dev); 35struct net_device_stats *hostap_get_stats(struct net_device *dev);
36void hostap_setup_dev(struct net_device *dev, local_info_t *local, 36void hostap_setup_dev(struct net_device *dev, local_info_t *local,
37 int main_dev); 37 int main_dev);
38void hostap_set_multicast_list_queue(void *data); 38void hostap_set_multicast_list_queue(struct work_struct *work);
39int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); 39int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
40int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); 40int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
41void hostap_cleanup(local_info_t *local); 41void hostap_cleanup(local_info_t *local);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index ba13125024cb..08bc57a4b895 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -49,10 +49,10 @@ MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs "
49static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); 49static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
50static void hostap_event_expired_sta(struct net_device *dev, 50static void hostap_event_expired_sta(struct net_device *dev,
51 struct sta_info *sta); 51 struct sta_info *sta);
52static void handle_add_proc_queue(void *data); 52static void handle_add_proc_queue(struct work_struct *work);
53 53
54#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 54#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
55static void handle_wds_oper_queue(void *data); 55static void handle_wds_oper_queue(struct work_struct *work);
56static void prism2_send_mgmt(struct net_device *dev, 56static void prism2_send_mgmt(struct net_device *dev,
57 u16 type_subtype, char *body, 57 u16 type_subtype, char *body,
58 int body_len, u8 *addr, u16 tx_cb_idx); 58 int body_len, u8 *addr, u16 tx_cb_idx);
@@ -807,7 +807,7 @@ void hostap_init_data(local_info_t *local)
807 INIT_LIST_HEAD(&ap->sta_list); 807 INIT_LIST_HEAD(&ap->sta_list);
808 808
809 /* Initialize task queue structure for AP management */ 809 /* Initialize task queue structure for AP management */
810 INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap); 810 INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
811 811
812 ap->tx_callback_idx = 812 ap->tx_callback_idx =
813 hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); 813 hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
@@ -815,7 +815,7 @@ void hostap_init_data(local_info_t *local)
815 printk(KERN_WARNING "%s: failed to register TX callback for " 815 printk(KERN_WARNING "%s: failed to register TX callback for "
816 "AP\n", local->dev->name); 816 "AP\n", local->dev->name);
817#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 817#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
818 INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local); 818 INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
819 819
820 ap->tx_callback_auth = 820 ap->tx_callback_auth =
821 hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); 821 hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
@@ -1062,9 +1062,10 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off,
1062} 1062}
1063 1063
1064 1064
1065static void handle_add_proc_queue(void *data) 1065static void handle_add_proc_queue(struct work_struct *work)
1066{ 1066{
1067 struct ap_data *ap = (struct ap_data *) data; 1067 struct ap_data *ap = container_of(work, struct ap_data,
1068 add_sta_proc_queue);
1068 struct sta_info *sta; 1069 struct sta_info *sta;
1069 char name[20]; 1070 char name[20];
1070 struct add_sta_proc_data *entry, *prev; 1071 struct add_sta_proc_data *entry, *prev;
@@ -1952,9 +1953,11 @@ static void handle_pspoll(local_info_t *local,
1952 1953
1953#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 1954#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
1954 1955
1955static void handle_wds_oper_queue(void *data) 1956static void handle_wds_oper_queue(struct work_struct *work)
1956{ 1957{
1957 local_info_t *local = data; 1958 struct ap_data *ap = container_of(work, struct ap_data,
1959 wds_oper_queue);
1960 local_info_t *local = ap->local;
1958 struct wds_oper_data *entry, *prev; 1961 struct wds_oper_data *entry, *prev;
1959 1962
1960 spin_lock_bh(&local->lock); 1963 spin_lock_bh(&local->lock);
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index f63909e4bc32..ee542ec6d6a8 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -293,15 +293,12 @@ static int sandisk_enable_wireless(struct net_device *dev)
293 goto done; 293 goto done;
294 } 294 }
295 295
296 tuple.DesiredTuple = CISTPL_MANFID;
297 tuple.Attributes = TUPLE_RETURN_COMMON; 296 tuple.Attributes = TUPLE_RETURN_COMMON;
298 tuple.TupleData = buf; 297 tuple.TupleData = buf;
299 tuple.TupleDataMax = sizeof(buf); 298 tuple.TupleDataMax = sizeof(buf);
300 tuple.TupleOffset = 0; 299 tuple.TupleOffset = 0;
301 if (pcmcia_get_first_tuple(hw_priv->link, &tuple) || 300
302 pcmcia_get_tuple_data(hw_priv->link, &tuple) || 301 if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) {
303 pcmcia_parse_tuple(hw_priv->link, &tuple, parse) ||
304 parse->manfid.manf != 0xd601 || parse->manfid.card != 0x0101) {
305 /* No SanDisk manfid found */ 302 /* No SanDisk manfid found */
306 ret = -ENODEV; 303 ret = -ENODEV;
307 goto done; 304 goto done;
@@ -573,16 +570,10 @@ static int prism2_config(struct pcmcia_device *link)
573 } 570 }
574 memset(hw_priv, 0, sizeof(*hw_priv)); 571 memset(hw_priv, 0, sizeof(*hw_priv));
575 572
576 tuple.DesiredTuple = CISTPL_CONFIG;
577 tuple.Attributes = 0; 573 tuple.Attributes = 0;
578 tuple.TupleData = buf; 574 tuple.TupleData = buf;
579 tuple.TupleDataMax = sizeof(buf); 575 tuple.TupleDataMax = sizeof(buf);
580 tuple.TupleOffset = 0; 576 tuple.TupleOffset = 0;
581 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
582 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
583 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
584 link->conf.ConfigBase = parse->config.base;
585 link->conf.Present = parse->config.rmask[0];
586 577
587 CS_CHECK(GetConfigurationInfo, 578 CS_CHECK(GetConfigurationInfo,
588 pcmcia_get_configuration_info(link, &conf)); 579 pcmcia_get_configuration_info(link, &conf));
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ed00ebb6e7f4..c19e68636a1c 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1645,9 +1645,9 @@ static void prism2_schedule_reset(local_info_t *local)
1645 1645
1646/* Called only as scheduled task after noticing card timeout in interrupt 1646/* Called only as scheduled task after noticing card timeout in interrupt
1647 * context */ 1647 * context */
1648static void handle_reset_queue(void *data) 1648static void handle_reset_queue(struct work_struct *work)
1649{ 1649{
1650 local_info_t *local = (local_info_t *) data; 1650 local_info_t *local = container_of(work, local_info_t, reset_queue);
1651 1651
1652 printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name); 1652 printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
1653 prism2_hw_reset(local->dev); 1653 prism2_hw_reset(local->dev);
@@ -2896,9 +2896,10 @@ static void hostap_passive_scan(unsigned long data)
2896 2896
2897/* Called only as a scheduled task when communications quality values should 2897/* Called only as a scheduled task when communications quality values should
2898 * be updated. */ 2898 * be updated. */
2899static void handle_comms_qual_update(void *data) 2899static void handle_comms_qual_update(struct work_struct *work)
2900{ 2900{
2901 local_info_t *local = data; 2901 local_info_t *local =
2902 container_of(work, local_info_t, comms_qual_update);
2902 prism2_update_comms_qual(local->dev); 2903 prism2_update_comms_qual(local->dev);
2903} 2904}
2904 2905
@@ -3050,9 +3051,9 @@ static int prism2_set_tim(struct net_device *dev, int aid, int set)
3050} 3051}
3051 3052
3052 3053
3053static void handle_set_tim_queue(void *data) 3054static void handle_set_tim_queue(struct work_struct *work)
3054{ 3055{
3055 local_info_t *local = (local_info_t *) data; 3056 local_info_t *local = container_of(work, local_info_t, set_tim_queue);
3056 struct set_tim_data *entry; 3057 struct set_tim_data *entry;
3057 u16 val; 3058 u16 val;
3058 3059
@@ -3209,15 +3210,15 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
3209 local->scan_channel_mask = 0xffff; 3210 local->scan_channel_mask = 0xffff;
3210 3211
3211 /* Initialize task queue structures */ 3212 /* Initialize task queue structures */
3212 INIT_WORK(&local->reset_queue, handle_reset_queue, local); 3213 INIT_WORK(&local->reset_queue, handle_reset_queue);
3213 INIT_WORK(&local->set_multicast_list_queue, 3214 INIT_WORK(&local->set_multicast_list_queue,
3214 hostap_set_multicast_list_queue, local->dev); 3215 hostap_set_multicast_list_queue);
3215 3216
3216 INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local); 3217 INIT_WORK(&local->set_tim_queue, handle_set_tim_queue);
3217 INIT_LIST_HEAD(&local->set_tim_list); 3218 INIT_LIST_HEAD(&local->set_tim_list);
3218 spin_lock_init(&local->set_tim_lock); 3219 spin_lock_init(&local->set_tim_lock);
3219 3220
3220 INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local); 3221 INIT_WORK(&local->comms_qual_update, handle_comms_qual_update);
3221 3222
3222 /* Initialize tasklets for handling hardware IRQ related operations 3223 /* Initialize tasklets for handling hardware IRQ related operations
3223 * outside hw IRQ handler */ 3224 * outside hw IRQ handler */
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 50f72d831cf4..5fd2b1ad7f5e 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -474,9 +474,9 @@ static void handle_info_queue_scanresults(local_info_t *local)
474 474
475/* Called only as scheduled task after receiving info frames (used to avoid 475/* Called only as scheduled task after receiving info frames (used to avoid
476 * pending too much time in HW IRQ handler). */ 476 * pending too much time in HW IRQ handler). */
477static void handle_info_queue(void *data) 477static void handle_info_queue(struct work_struct *work)
478{ 478{
479 local_info_t *local = (local_info_t *) data; 479 local_info_t *local = container_of(work, local_info_t, info_queue);
480 480
481 if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS, 481 if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
482 &local->pending_info)) 482 &local->pending_info))
@@ -493,7 +493,7 @@ void hostap_info_init(local_info_t *local)
493{ 493{
494 skb_queue_head_init(&local->info_list); 494 skb_queue_head_init(&local->info_list);
495#ifndef PRISM2_NO_STATION_MODES 495#ifndef PRISM2_NO_STATION_MODES
496 INIT_WORK(&local->info_queue, handle_info_queue, local); 496 INIT_WORK(&local->info_queue, handle_info_queue);
497#endif /* PRISM2_NO_STATION_MODES */ 497#endif /* PRISM2_NO_STATION_MODES */
498} 498}
499 499
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 53374fcba77e..0796be9d9e77 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -767,14 +767,14 @@ static int prism2_set_mac_address(struct net_device *dev, void *p)
767 767
768/* TODO: to be further implemented as soon as Prism2 fully supports 768/* TODO: to be further implemented as soon as Prism2 fully supports
769 * GroupAddresses and correct documentation is available */ 769 * GroupAddresses and correct documentation is available */
770void hostap_set_multicast_list_queue(void *data) 770void hostap_set_multicast_list_queue(struct work_struct *work)
771{ 771{
772 struct net_device *dev = (struct net_device *) data; 772 local_info_t *local =
773 container_of(work, local_info_t, set_multicast_list_queue);
774 struct net_device *dev = local->dev;
773 struct hostap_interface *iface; 775 struct hostap_interface *iface;
774 local_info_t *local;
775 776
776 iface = netdev_priv(dev); 777 iface = netdev_priv(dev);
777 local = iface->local;
778 if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, 778 if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
779 local->is_promisc)) { 779 local->is_promisc)) {
780 printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", 780 printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 79607b8b877c..1bcd352a813b 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -316,7 +316,7 @@ static void ipw2100_release_firmware(struct ipw2100_priv *priv,
316 struct ipw2100_fw *fw); 316 struct ipw2100_fw *fw);
317static int ipw2100_ucode_download(struct ipw2100_priv *priv, 317static int ipw2100_ucode_download(struct ipw2100_priv *priv,
318 struct ipw2100_fw *fw); 318 struct ipw2100_fw *fw);
319static void ipw2100_wx_event_work(struct ipw2100_priv *priv); 319static void ipw2100_wx_event_work(struct work_struct *work);
320static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); 320static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
321static struct iw_handler_def ipw2100_wx_handler_def; 321static struct iw_handler_def ipw2100_wx_handler_def;
322 322
@@ -679,7 +679,8 @@ static void schedule_reset(struct ipw2100_priv *priv)
679 queue_delayed_work(priv->workqueue, &priv->reset_work, 679 queue_delayed_work(priv->workqueue, &priv->reset_work,
680 priv->reset_backoff * HZ); 680 priv->reset_backoff * HZ);
681 else 681 else
682 queue_work(priv->workqueue, &priv->reset_work); 682 queue_delayed_work(priv->workqueue, &priv->reset_work,
683 0);
683 684
684 if (priv->reset_backoff < MAX_RESET_BACKOFF) 685 if (priv->reset_backoff < MAX_RESET_BACKOFF)
685 priv->reset_backoff++; 686 priv->reset_backoff++;
@@ -1873,8 +1874,10 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1873 netif_stop_queue(priv->net_dev); 1874 netif_stop_queue(priv->net_dev);
1874} 1875}
1875 1876
1876static void ipw2100_reset_adapter(struct ipw2100_priv *priv) 1877static void ipw2100_reset_adapter(struct work_struct *work)
1877{ 1878{
1879 struct ipw2100_priv *priv =
1880 container_of(work, struct ipw2100_priv, reset_work.work);
1878 unsigned long flags; 1881 unsigned long flags;
1879 union iwreq_data wrqu = { 1882 union iwreq_data wrqu = {
1880 .ap_addr = { 1883 .ap_addr = {
@@ -2071,9 +2074,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
2071 return; 2074 return;
2072 2075
2073 if (priv->status & STATUS_SECURITY_UPDATED) 2076 if (priv->status & STATUS_SECURITY_UPDATED)
2074 queue_work(priv->workqueue, &priv->security_work); 2077 queue_delayed_work(priv->workqueue, &priv->security_work, 0);
2075 2078
2076 queue_work(priv->workqueue, &priv->wx_event_work); 2079 queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
2077} 2080}
2078 2081
2079static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) 2082static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -5524,8 +5527,11 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode)
5524 return err; 5527 return err;
5525} 5528}
5526 5529
5527static void ipw2100_security_work(struct ipw2100_priv *priv) 5530static void ipw2100_security_work(struct work_struct *work)
5528{ 5531{
5532 struct ipw2100_priv *priv =
5533 container_of(work, struct ipw2100_priv, security_work.work);
5534
5529 /* If we happen to have reconnected before we get a chance to 5535 /* If we happen to have reconnected before we get a chance to
5530 * process this, then update the security settings--which causes 5536 * process this, then update the security settings--which causes
5531 * a disassociation to occur */ 5537 * a disassociation to occur */
@@ -5748,7 +5754,7 @@ static int ipw2100_set_address(struct net_device *dev, void *p)
5748 5754
5749 priv->reset_backoff = 0; 5755 priv->reset_backoff = 0;
5750 mutex_unlock(&priv->action_mutex); 5756 mutex_unlock(&priv->action_mutex);
5751 ipw2100_reset_adapter(priv); 5757 ipw2100_reset_adapter(&priv->reset_work.work);
5752 return 0; 5758 return 0;
5753 5759
5754 done: 5760 done:
@@ -5910,9 +5916,10 @@ static const struct ethtool_ops ipw2100_ethtool_ops = {
5910 .get_drvinfo = ipw_ethtool_get_drvinfo, 5916 .get_drvinfo = ipw_ethtool_get_drvinfo,
5911}; 5917};
5912 5918
5913static void ipw2100_hang_check(void *adapter) 5919static void ipw2100_hang_check(struct work_struct *work)
5914{ 5920{
5915 struct ipw2100_priv *priv = adapter; 5921 struct ipw2100_priv *priv =
5922 container_of(work, struct ipw2100_priv, hang_check.work);
5916 unsigned long flags; 5923 unsigned long flags;
5917 u32 rtc = 0xa5a5a5a5; 5924 u32 rtc = 0xa5a5a5a5;
5918 u32 len = sizeof(rtc); 5925 u32 len = sizeof(rtc);
@@ -5952,9 +5959,10 @@ static void ipw2100_hang_check(void *adapter)
5952 spin_unlock_irqrestore(&priv->low_lock, flags); 5959 spin_unlock_irqrestore(&priv->low_lock, flags);
5953} 5960}
5954 5961
5955static void ipw2100_rf_kill(void *adapter) 5962static void ipw2100_rf_kill(struct work_struct *work)
5956{ 5963{
5957 struct ipw2100_priv *priv = adapter; 5964 struct ipw2100_priv *priv =
5965 container_of(work, struct ipw2100_priv, rf_kill.work);
5958 unsigned long flags; 5966 unsigned long flags;
5959 5967
5960 spin_lock_irqsave(&priv->low_lock, flags); 5968 spin_lock_irqsave(&priv->low_lock, flags);
@@ -6103,14 +6111,11 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6103 6111
6104 priv->workqueue = create_workqueue(DRV_NAME); 6112 priv->workqueue = create_workqueue(DRV_NAME);
6105 6113
6106 INIT_WORK(&priv->reset_work, 6114 INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
6107 (void (*)(void *))ipw2100_reset_adapter, priv); 6115 INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
6108 INIT_WORK(&priv->security_work, 6116 INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
6109 (void (*)(void *))ipw2100_security_work, priv); 6117 INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
6110 INIT_WORK(&priv->wx_event_work, 6118 INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
6111 (void (*)(void *))ipw2100_wx_event_work, priv);
6112 INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
6113 INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
6114 6119
6115 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 6120 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6116 ipw2100_irq_tasklet, (unsigned long)priv); 6121 ipw2100_irq_tasklet, (unsigned long)priv);
@@ -8281,8 +8286,10 @@ static struct iw_handler_def ipw2100_wx_handler_def = {
8281 .get_wireless_stats = ipw2100_wx_wireless_stats, 8286 .get_wireless_stats = ipw2100_wx_wireless_stats,
8282}; 8287};
8283 8288
8284static void ipw2100_wx_event_work(struct ipw2100_priv *priv) 8289static void ipw2100_wx_event_work(struct work_struct *work)
8285{ 8290{
8291 struct ipw2100_priv *priv =
8292 container_of(work, struct ipw2100_priv, wx_event_work.work);
8286 union iwreq_data wrqu; 8293 union iwreq_data wrqu;
8287 int len = ETH_ALEN; 8294 int len = ETH_ALEN;
8288 8295
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
index 55b7227198df..de7d384d38af 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2100.h
@@ -583,11 +583,11 @@ struct ipw2100_priv {
583 struct tasklet_struct irq_tasklet; 583 struct tasklet_struct irq_tasklet;
584 584
585 struct workqueue_struct *workqueue; 585 struct workqueue_struct *workqueue;
586 struct work_struct reset_work; 586 struct delayed_work reset_work;
587 struct work_struct security_work; 587 struct delayed_work security_work;
588 struct work_struct wx_event_work; 588 struct delayed_work wx_event_work;
589 struct work_struct hang_check; 589 struct delayed_work hang_check;
590 struct work_struct rf_kill; 590 struct delayed_work rf_kill;
591 591
592 u32 interrupts; 592 u32 interrupts;
593 int tx_interrupts; 593 int tx_interrupts;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index c692d01a76ca..e82e56bb85e1 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -187,9 +187,9 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); 187static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188static void ipw_rx_queue_replenish(void *); 188static void ipw_rx_queue_replenish(void *);
189static int ipw_up(struct ipw_priv *); 189static int ipw_up(struct ipw_priv *);
190static void ipw_bg_up(void *); 190static void ipw_bg_up(struct work_struct *work);
191static void ipw_down(struct ipw_priv *); 191static void ipw_down(struct ipw_priv *);
192static void ipw_bg_down(void *); 192static void ipw_bg_down(struct work_struct *work);
193static int ipw_config(struct ipw_priv *); 193static int ipw_config(struct ipw_priv *);
194static int init_supported_rates(struct ipw_priv *priv, 194static int init_supported_rates(struct ipw_priv *priv,
195 struct ipw_supported_rates *prates); 195 struct ipw_supported_rates *prates);
@@ -862,11 +862,12 @@ static void ipw_led_link_on(struct ipw_priv *priv)
862 spin_unlock_irqrestore(&priv->lock, flags); 862 spin_unlock_irqrestore(&priv->lock, flags);
863} 863}
864 864
865static void ipw_bg_led_link_on(void *data) 865static void ipw_bg_led_link_on(struct work_struct *work)
866{ 866{
867 struct ipw_priv *priv = data; 867 struct ipw_priv *priv =
868 container_of(work, struct ipw_priv, led_link_on.work);
868 mutex_lock(&priv->mutex); 869 mutex_lock(&priv->mutex);
869 ipw_led_link_on(data); 870 ipw_led_link_on(priv);
870 mutex_unlock(&priv->mutex); 871 mutex_unlock(&priv->mutex);
871} 872}
872 873
@@ -906,11 +907,12 @@ static void ipw_led_link_off(struct ipw_priv *priv)
906 spin_unlock_irqrestore(&priv->lock, flags); 907 spin_unlock_irqrestore(&priv->lock, flags);
907} 908}
908 909
909static void ipw_bg_led_link_off(void *data) 910static void ipw_bg_led_link_off(struct work_struct *work)
910{ 911{
911 struct ipw_priv *priv = data; 912 struct ipw_priv *priv =
913 container_of(work, struct ipw_priv, led_link_off.work);
912 mutex_lock(&priv->mutex); 914 mutex_lock(&priv->mutex);
913 ipw_led_link_off(data); 915 ipw_led_link_off(priv);
914 mutex_unlock(&priv->mutex); 916 mutex_unlock(&priv->mutex);
915} 917}
916 918
@@ -985,11 +987,12 @@ static void ipw_led_activity_off(struct ipw_priv *priv)
985 spin_unlock_irqrestore(&priv->lock, flags); 987 spin_unlock_irqrestore(&priv->lock, flags);
986} 988}
987 989
988static void ipw_bg_led_activity_off(void *data) 990static void ipw_bg_led_activity_off(struct work_struct *work)
989{ 991{
990 struct ipw_priv *priv = data; 992 struct ipw_priv *priv =
993 container_of(work, struct ipw_priv, led_act_off.work);
991 mutex_lock(&priv->mutex); 994 mutex_lock(&priv->mutex);
992 ipw_led_activity_off(data); 995 ipw_led_activity_off(priv);
993 mutex_unlock(&priv->mutex); 996 mutex_unlock(&priv->mutex);
994} 997}
995 998
@@ -2228,11 +2231,12 @@ static void ipw_adapter_restart(void *adapter)
2228 } 2231 }
2229} 2232}
2230 2233
2231static void ipw_bg_adapter_restart(void *data) 2234static void ipw_bg_adapter_restart(struct work_struct *work)
2232{ 2235{
2233 struct ipw_priv *priv = data; 2236 struct ipw_priv *priv =
2237 container_of(work, struct ipw_priv, adapter_restart);
2234 mutex_lock(&priv->mutex); 2238 mutex_lock(&priv->mutex);
2235 ipw_adapter_restart(data); 2239 ipw_adapter_restart(priv);
2236 mutex_unlock(&priv->mutex); 2240 mutex_unlock(&priv->mutex);
2237} 2241}
2238 2242
@@ -2249,11 +2253,12 @@ static void ipw_scan_check(void *data)
2249 } 2253 }
2250} 2254}
2251 2255
2252static void ipw_bg_scan_check(void *data) 2256static void ipw_bg_scan_check(struct work_struct *work)
2253{ 2257{
2254 struct ipw_priv *priv = data; 2258 struct ipw_priv *priv =
2259 container_of(work, struct ipw_priv, scan_check.work);
2255 mutex_lock(&priv->mutex); 2260 mutex_lock(&priv->mutex);
2256 ipw_scan_check(data); 2261 ipw_scan_check(priv);
2257 mutex_unlock(&priv->mutex); 2262 mutex_unlock(&priv->mutex);
2258} 2263}
2259 2264
@@ -3831,17 +3836,19 @@ static int ipw_disassociate(void *data)
3831 return 1; 3836 return 1;
3832} 3837}
3833 3838
3834static void ipw_bg_disassociate(void *data) 3839static void ipw_bg_disassociate(struct work_struct *work)
3835{ 3840{
3836 struct ipw_priv *priv = data; 3841 struct ipw_priv *priv =
3842 container_of(work, struct ipw_priv, disassociate);
3837 mutex_lock(&priv->mutex); 3843 mutex_lock(&priv->mutex);
3838 ipw_disassociate(data); 3844 ipw_disassociate(priv);
3839 mutex_unlock(&priv->mutex); 3845 mutex_unlock(&priv->mutex);
3840} 3846}
3841 3847
3842static void ipw_system_config(void *data) 3848static void ipw_system_config(struct work_struct *work)
3843{ 3849{
3844 struct ipw_priv *priv = data; 3850 struct ipw_priv *priv =
3851 container_of(work, struct ipw_priv, system_config);
3845 3852
3846#ifdef CONFIG_IPW2200_PROMISCUOUS 3853#ifdef CONFIG_IPW2200_PROMISCUOUS
3847 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 3854 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
@@ -4208,11 +4215,12 @@ static void ipw_gather_stats(struct ipw_priv *priv)
4208 IPW_STATS_INTERVAL); 4215 IPW_STATS_INTERVAL);
4209} 4216}
4210 4217
4211static void ipw_bg_gather_stats(void *data) 4218static void ipw_bg_gather_stats(struct work_struct *work)
4212{ 4219{
4213 struct ipw_priv *priv = data; 4220 struct ipw_priv *priv =
4221 container_of(work, struct ipw_priv, gather_stats.work);
4214 mutex_lock(&priv->mutex); 4222 mutex_lock(&priv->mutex);
4215 ipw_gather_stats(data); 4223 ipw_gather_stats(priv);
4216 mutex_unlock(&priv->mutex); 4224 mutex_unlock(&priv->mutex);
4217} 4225}
4218 4226
@@ -4268,8 +4276,8 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4268 if (!(priv->status & STATUS_ROAMING)) { 4276 if (!(priv->status & STATUS_ROAMING)) {
4269 priv->status |= STATUS_ROAMING; 4277 priv->status |= STATUS_ROAMING;
4270 if (!(priv->status & STATUS_SCANNING)) 4278 if (!(priv->status & STATUS_SCANNING))
4271 queue_work(priv->workqueue, 4279 queue_delayed_work(priv->workqueue,
4272 &priv->request_scan); 4280 &priv->request_scan, 0);
4273 } 4281 }
4274 return; 4282 return;
4275 } 4283 }
@@ -4607,8 +4615,8 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4607#ifdef CONFIG_IPW2200_MONITOR 4615#ifdef CONFIG_IPW2200_MONITOR
4608 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4616 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4609 priv->status |= STATUS_SCAN_FORCED; 4617 priv->status |= STATUS_SCAN_FORCED;
4610 queue_work(priv->workqueue, 4618 queue_delayed_work(priv->workqueue,
4611 &priv->request_scan); 4619 &priv->request_scan, 0);
4612 break; 4620 break;
4613 } 4621 }
4614 priv->status &= ~STATUS_SCAN_FORCED; 4622 priv->status &= ~STATUS_SCAN_FORCED;
@@ -4631,8 +4639,8 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4631 /* Don't schedule if we aborted the scan */ 4639 /* Don't schedule if we aborted the scan */
4632 priv->status &= ~STATUS_ROAMING; 4640 priv->status &= ~STATUS_ROAMING;
4633 } else if (priv->status & STATUS_SCAN_PENDING) 4641 } else if (priv->status & STATUS_SCAN_PENDING)
4634 queue_work(priv->workqueue, 4642 queue_delayed_work(priv->workqueue,
4635 &priv->request_scan); 4643 &priv->request_scan, 0);
4636 else if (priv->config & CFG_BACKGROUND_SCAN 4644 else if (priv->config & CFG_BACKGROUND_SCAN
4637 && priv->status & STATUS_ASSOCIATED) 4645 && priv->status & STATUS_ASSOCIATED)
4638 queue_delayed_work(priv->workqueue, 4646 queue_delayed_work(priv->workqueue,
@@ -5055,11 +5063,12 @@ static void ipw_rx_queue_replenish(void *data)
5055 ipw_rx_queue_restock(priv); 5063 ipw_rx_queue_restock(priv);
5056} 5064}
5057 5065
5058static void ipw_bg_rx_queue_replenish(void *data) 5066static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5059{ 5067{
5060 struct ipw_priv *priv = data; 5068 struct ipw_priv *priv =
5069 container_of(work, struct ipw_priv, rx_replenish);
5061 mutex_lock(&priv->mutex); 5070 mutex_lock(&priv->mutex);
5062 ipw_rx_queue_replenish(data); 5071 ipw_rx_queue_replenish(priv);
5063 mutex_unlock(&priv->mutex); 5072 mutex_unlock(&priv->mutex);
5064} 5073}
5065 5074
@@ -5489,9 +5498,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5489 return 1; 5498 return 1;
5490} 5499}
5491 5500
5492static void ipw_merge_adhoc_network(void *data) 5501static void ipw_merge_adhoc_network(struct work_struct *work)
5493{ 5502{
5494 struct ipw_priv *priv = data; 5503 struct ipw_priv *priv =
5504 container_of(work, struct ipw_priv, merge_networks);
5495 struct ieee80211_network *network = NULL; 5505 struct ieee80211_network *network = NULL;
5496 struct ipw_network_match match = { 5506 struct ipw_network_match match = {
5497 .network = priv->assoc_network 5507 .network = priv->assoc_network
@@ -5948,11 +5958,12 @@ static void ipw_adhoc_check(void *data)
5948 priv->assoc_request.beacon_interval); 5958 priv->assoc_request.beacon_interval);
5949} 5959}
5950 5960
5951static void ipw_bg_adhoc_check(void *data) 5961static void ipw_bg_adhoc_check(struct work_struct *work)
5952{ 5962{
5953 struct ipw_priv *priv = data; 5963 struct ipw_priv *priv =
5964 container_of(work, struct ipw_priv, adhoc_check.work);
5954 mutex_lock(&priv->mutex); 5965 mutex_lock(&priv->mutex);
5955 ipw_adhoc_check(data); 5966 ipw_adhoc_check(priv);
5956 mutex_unlock(&priv->mutex); 5967 mutex_unlock(&priv->mutex);
5957} 5968}
5958 5969
@@ -6299,19 +6310,26 @@ done:
6299 return err; 6310 return err;
6300} 6311}
6301 6312
6302static int ipw_request_passive_scan(struct ipw_priv *priv) { 6313static void ipw_request_passive_scan(struct work_struct *work)
6303 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); 6314{
6315 struct ipw_priv *priv =
6316 container_of(work, struct ipw_priv, request_passive_scan);
6317 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6304} 6318}
6305 6319
6306static int ipw_request_scan(struct ipw_priv *priv) { 6320static void ipw_request_scan(struct work_struct *work)
6307 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); 6321{
6322 struct ipw_priv *priv =
6323 container_of(work, struct ipw_priv, request_scan.work);
6324 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6308} 6325}
6309 6326
6310static void ipw_bg_abort_scan(void *data) 6327static void ipw_bg_abort_scan(struct work_struct *work)
6311{ 6328{
6312 struct ipw_priv *priv = data; 6329 struct ipw_priv *priv =
6330 container_of(work, struct ipw_priv, abort_scan);
6313 mutex_lock(&priv->mutex); 6331 mutex_lock(&priv->mutex);
6314 ipw_abort_scan(data); 6332 ipw_abort_scan(priv);
6315 mutex_unlock(&priv->mutex); 6333 mutex_unlock(&priv->mutex);
6316} 6334}
6317 6335
@@ -7084,9 +7102,10 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7084/* 7102/*
7085* background support to run QoS activate functionality 7103* background support to run QoS activate functionality
7086*/ 7104*/
7087static void ipw_bg_qos_activate(void *data) 7105static void ipw_bg_qos_activate(struct work_struct *work)
7088{ 7106{
7089 struct ipw_priv *priv = data; 7107 struct ipw_priv *priv =
7108 container_of(work, struct ipw_priv, qos_activate);
7090 7109
7091 if (priv == NULL) 7110 if (priv == NULL)
7092 return; 7111 return;
@@ -7394,11 +7413,12 @@ static void ipw_roam(void *data)
7394 priv->status &= ~STATUS_ROAMING; 7413 priv->status &= ~STATUS_ROAMING;
7395} 7414}
7396 7415
7397static void ipw_bg_roam(void *data) 7416static void ipw_bg_roam(struct work_struct *work)
7398{ 7417{
7399 struct ipw_priv *priv = data; 7418 struct ipw_priv *priv =
7419 container_of(work, struct ipw_priv, roam);
7400 mutex_lock(&priv->mutex); 7420 mutex_lock(&priv->mutex);
7401 ipw_roam(data); 7421 ipw_roam(priv);
7402 mutex_unlock(&priv->mutex); 7422 mutex_unlock(&priv->mutex);
7403} 7423}
7404 7424
@@ -7479,8 +7499,8 @@ static int ipw_associate(void *data)
7479 &priv->request_scan, 7499 &priv->request_scan,
7480 SCAN_INTERVAL); 7500 SCAN_INTERVAL);
7481 else 7501 else
7482 queue_work(priv->workqueue, 7502 queue_delayed_work(priv->workqueue,
7483 &priv->request_scan); 7503 &priv->request_scan, 0);
7484 } 7504 }
7485 7505
7486 return 0; 7506 return 0;
@@ -7491,11 +7511,12 @@ static int ipw_associate(void *data)
7491 return 1; 7511 return 1;
7492} 7512}
7493 7513
7494static void ipw_bg_associate(void *data) 7514static void ipw_bg_associate(struct work_struct *work)
7495{ 7515{
7496 struct ipw_priv *priv = data; 7516 struct ipw_priv *priv =
7517 container_of(work, struct ipw_priv, associate);
7497 mutex_lock(&priv->mutex); 7518 mutex_lock(&priv->mutex);
7498 ipw_associate(data); 7519 ipw_associate(priv);
7499 mutex_unlock(&priv->mutex); 7520 mutex_unlock(&priv->mutex);
7500} 7521}
7501 7522
@@ -9410,7 +9431,7 @@ static int ipw_wx_set_scan(struct net_device *dev,
9410 9431
9411 IPW_DEBUG_WX("Start scan\n"); 9432 IPW_DEBUG_WX("Start scan\n");
9412 9433
9413 queue_work(priv->workqueue, &priv->request_scan); 9434 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
9414 9435
9415 return 0; 9436 return 0;
9416} 9437}
@@ -10547,11 +10568,12 @@ static void ipw_rf_kill(void *adapter)
10547 spin_unlock_irqrestore(&priv->lock, flags); 10568 spin_unlock_irqrestore(&priv->lock, flags);
10548} 10569}
10549 10570
10550static void ipw_bg_rf_kill(void *data) 10571static void ipw_bg_rf_kill(struct work_struct *work)
10551{ 10572{
10552 struct ipw_priv *priv = data; 10573 struct ipw_priv *priv =
10574 container_of(work, struct ipw_priv, rf_kill.work);
10553 mutex_lock(&priv->mutex); 10575 mutex_lock(&priv->mutex);
10554 ipw_rf_kill(data); 10576 ipw_rf_kill(priv);
10555 mutex_unlock(&priv->mutex); 10577 mutex_unlock(&priv->mutex);
10556} 10578}
10557 10579
@@ -10582,11 +10604,12 @@ static void ipw_link_up(struct ipw_priv *priv)
10582 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); 10604 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10583} 10605}
10584 10606
10585static void ipw_bg_link_up(void *data) 10607static void ipw_bg_link_up(struct work_struct *work)
10586{ 10608{
10587 struct ipw_priv *priv = data; 10609 struct ipw_priv *priv =
10610 container_of(work, struct ipw_priv, link_up);
10588 mutex_lock(&priv->mutex); 10611 mutex_lock(&priv->mutex);
10589 ipw_link_up(data); 10612 ipw_link_up(priv);
10590 mutex_unlock(&priv->mutex); 10613 mutex_unlock(&priv->mutex);
10591} 10614}
10592 10615
@@ -10606,15 +10629,16 @@ static void ipw_link_down(struct ipw_priv *priv)
10606 10629
10607 if (!(priv->status & STATUS_EXIT_PENDING)) { 10630 if (!(priv->status & STATUS_EXIT_PENDING)) {
10608 /* Queue up another scan... */ 10631 /* Queue up another scan... */
10609 queue_work(priv->workqueue, &priv->request_scan); 10632 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10610 } 10633 }
10611} 10634}
10612 10635
10613static void ipw_bg_link_down(void *data) 10636static void ipw_bg_link_down(struct work_struct *work)
10614{ 10637{
10615 struct ipw_priv *priv = data; 10638 struct ipw_priv *priv =
10639 container_of(work, struct ipw_priv, link_down);
10616 mutex_lock(&priv->mutex); 10640 mutex_lock(&priv->mutex);
10617 ipw_link_down(data); 10641 ipw_link_down(priv);
10618 mutex_unlock(&priv->mutex); 10642 mutex_unlock(&priv->mutex);
10619} 10643}
10620 10644
@@ -10626,38 +10650,30 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
10626 init_waitqueue_head(&priv->wait_command_queue); 10650 init_waitqueue_head(&priv->wait_command_queue);
10627 init_waitqueue_head(&priv->wait_state); 10651 init_waitqueue_head(&priv->wait_state);
10628 10652
10629 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv); 10653 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10630 INIT_WORK(&priv->associate, ipw_bg_associate, priv); 10654 INIT_WORK(&priv->associate, ipw_bg_associate);
10631 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv); 10655 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10632 INIT_WORK(&priv->system_config, ipw_system_config, priv); 10656 INIT_WORK(&priv->system_config, ipw_system_config);
10633 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv); 10657 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10634 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv); 10658 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10635 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv); 10659 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10636 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv); 10660 INIT_WORK(&priv->up, ipw_bg_up);
10637 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv); 10661 INIT_WORK(&priv->down, ipw_bg_down);
10638 INIT_WORK(&priv->request_scan, 10662 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10639 (void (*)(void *))ipw_request_scan, priv); 10663 INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10640 INIT_WORK(&priv->request_passive_scan, 10664 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10641 (void (*)(void *))ipw_request_passive_scan, priv); 10665 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10642 INIT_WORK(&priv->gather_stats, 10666 INIT_WORK(&priv->roam, ipw_bg_roam);
10643 (void (*)(void *))ipw_bg_gather_stats, priv); 10667 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10644 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv); 10668 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10645 INIT_WORK(&priv->roam, ipw_bg_roam, priv); 10669 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10646 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv); 10670 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10647 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv); 10671 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10648 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv); 10672 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10649 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on, 10673 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10650 priv);
10651 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10652 priv);
10653 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10654 priv);
10655 INIT_WORK(&priv->merge_networks,
10656 (void (*)(void *))ipw_merge_adhoc_network, priv);
10657 10674
10658#ifdef CONFIG_IPW2200_QOS 10675#ifdef CONFIG_IPW2200_QOS
10659 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate, 10676 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10660 priv);
10661#endif /* CONFIG_IPW2200_QOS */ 10677#endif /* CONFIG_IPW2200_QOS */
10662 10678
10663 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 10679 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@ -11190,7 +11206,8 @@ static int ipw_up(struct ipw_priv *priv)
11190 11206
11191 /* If configure to try and auto-associate, kick 11207 /* If configure to try and auto-associate, kick
11192 * off a scan. */ 11208 * off a scan. */
11193 queue_work(priv->workqueue, &priv->request_scan); 11209 queue_delayed_work(priv->workqueue,
11210 &priv->request_scan, 0);
11194 11211
11195 return 0; 11212 return 0;
11196 } 11213 }
@@ -11211,11 +11228,12 @@ static int ipw_up(struct ipw_priv *priv)
11211 return -EIO; 11228 return -EIO;
11212} 11229}
11213 11230
11214static void ipw_bg_up(void *data) 11231static void ipw_bg_up(struct work_struct *work)
11215{ 11232{
11216 struct ipw_priv *priv = data; 11233 struct ipw_priv *priv =
11234 container_of(work, struct ipw_priv, up);
11217 mutex_lock(&priv->mutex); 11235 mutex_lock(&priv->mutex);
11218 ipw_up(data); 11236 ipw_up(priv);
11219 mutex_unlock(&priv->mutex); 11237 mutex_unlock(&priv->mutex);
11220} 11238}
11221 11239
@@ -11282,11 +11300,12 @@ static void ipw_down(struct ipw_priv *priv)
11282 ipw_led_radio_off(priv); 11300 ipw_led_radio_off(priv);
11283} 11301}
11284 11302
11285static void ipw_bg_down(void *data) 11303static void ipw_bg_down(struct work_struct *work)
11286{ 11304{
11287 struct ipw_priv *priv = data; 11305 struct ipw_priv *priv =
11306 container_of(work, struct ipw_priv, down);
11288 mutex_lock(&priv->mutex); 11307 mutex_lock(&priv->mutex);
11289 ipw_down(data); 11308 ipw_down(priv);
11290 mutex_unlock(&priv->mutex); 11309 mutex_unlock(&priv->mutex);
11291} 11310}
11292 11311
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index dad5eedefbf1..626a240a87d8 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1290,21 +1290,21 @@ struct ipw_priv {
1290 1290
1291 struct workqueue_struct *workqueue; 1291 struct workqueue_struct *workqueue;
1292 1292
1293 struct work_struct adhoc_check; 1293 struct delayed_work adhoc_check;
1294 struct work_struct associate; 1294 struct work_struct associate;
1295 struct work_struct disassociate; 1295 struct work_struct disassociate;
1296 struct work_struct system_config; 1296 struct work_struct system_config;
1297 struct work_struct rx_replenish; 1297 struct work_struct rx_replenish;
1298 struct work_struct request_scan; 1298 struct delayed_work request_scan;
1299 struct work_struct request_passive_scan; 1299 struct work_struct request_passive_scan;
1300 struct work_struct adapter_restart; 1300 struct work_struct adapter_restart;
1301 struct work_struct rf_kill; 1301 struct delayed_work rf_kill;
1302 struct work_struct up; 1302 struct work_struct up;
1303 struct work_struct down; 1303 struct work_struct down;
1304 struct work_struct gather_stats; 1304 struct delayed_work gather_stats;
1305 struct work_struct abort_scan; 1305 struct work_struct abort_scan;
1306 struct work_struct roam; 1306 struct work_struct roam;
1307 struct work_struct scan_check; 1307 struct delayed_work scan_check;
1308 struct work_struct link_up; 1308 struct work_struct link_up;
1309 struct work_struct link_down; 1309 struct work_struct link_down;
1310 1310
@@ -1319,9 +1319,9 @@ struct ipw_priv {
1319 u32 led_ofdm_on; 1319 u32 led_ofdm_on;
1320 u32 led_ofdm_off; 1320 u32 led_ofdm_off;
1321 1321
1322 struct work_struct led_link_on; 1322 struct delayed_work led_link_on;
1323 struct work_struct led_link_off; 1323 struct delayed_work led_link_off;
1324 struct work_struct led_act_off; 1324 struct delayed_work led_act_off;
1325 struct work_struct merge_networks; 1325 struct work_struct merge_networks;
1326 1326
1327 struct ipw_cmd_log *cmdlog; 1327 struct ipw_cmd_log *cmdlog;
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index 6714e0dfa8d6..644b4741ef74 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -735,10 +735,7 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
735static int netwave_pcmcia_config(struct pcmcia_device *link) { 735static int netwave_pcmcia_config(struct pcmcia_device *link) {
736 struct net_device *dev = link->priv; 736 struct net_device *dev = link->priv;
737 netwave_private *priv = netdev_priv(dev); 737 netwave_private *priv = netdev_priv(dev);
738 tuple_t tuple;
739 cisparse_t parse;
740 int i, j, last_ret, last_fn; 738 int i, j, last_ret, last_fn;
741 u_char buf[64];
742 win_req_t req; 739 win_req_t req;
743 memreq_t mem; 740 memreq_t mem;
744 u_char __iomem *ramBase = NULL; 741 u_char __iomem *ramBase = NULL;
@@ -746,21 +743,6 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
746 DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link); 743 DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link);
747 744
748 /* 745 /*
749 This reads the card's CONFIG tuple to find its configuration
750 registers.
751 */
752 tuple.Attributes = 0;
753 tuple.TupleData = (cisdata_t *) buf;
754 tuple.TupleDataMax = 64;
755 tuple.TupleOffset = 0;
756 tuple.DesiredTuple = CISTPL_CONFIG;
757 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
758 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
759 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
760 link->conf.ConfigBase = parse.config.base;
761 link->conf.Present = parse.config.rmask[0];
762
763 /*
764 * Try allocating IO ports. This tries a few fixed addresses. 746 * Try allocating IO ports. This tries a few fixed addresses.
765 * If you want, you can also read the card's config table to 747 * If you want, you can also read the card's config table to
766 * pick addresses -- see the serial driver for an example. 748 * pick addresses -- see the serial driver for an example.
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 336cabac13b3..936c888e03e1 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -980,9 +980,11 @@ static void print_linkstatus(struct net_device *dev, u16 status)
980} 980}
981 981
982/* Search scan results for requested BSSID, join it if found */ 982/* Search scan results for requested BSSID, join it if found */
983static void orinoco_join_ap(struct net_device *dev) 983static void orinoco_join_ap(struct work_struct *work)
984{ 984{
985 struct orinoco_private *priv = netdev_priv(dev); 985 struct orinoco_private *priv =
986 container_of(work, struct orinoco_private, join_work);
987 struct net_device *dev = priv->ndev;
986 struct hermes *hw = &priv->hw; 988 struct hermes *hw = &priv->hw;
987 int err; 989 int err;
988 unsigned long flags; 990 unsigned long flags;
@@ -1055,9 +1057,11 @@ static void orinoco_join_ap(struct net_device *dev)
1055} 1057}
1056 1058
1057/* Send new BSSID to userspace */ 1059/* Send new BSSID to userspace */
1058static void orinoco_send_wevents(struct net_device *dev) 1060static void orinoco_send_wevents(struct work_struct *work)
1059{ 1061{
1060 struct orinoco_private *priv = netdev_priv(dev); 1062 struct orinoco_private *priv =
1063 container_of(work, struct orinoco_private, wevent_work);
1064 struct net_device *dev = priv->ndev;
1061 struct hermes *hw = &priv->hw; 1065 struct hermes *hw = &priv->hw;
1062 union iwreq_data wrqu; 1066 union iwreq_data wrqu;
1063 int err; 1067 int err;
@@ -1864,9 +1868,11 @@ __orinoco_set_multicast_list(struct net_device *dev)
1864 1868
1865/* This must be called from user context, without locks held - use 1869/* This must be called from user context, without locks held - use
1866 * schedule_work() */ 1870 * schedule_work() */
1867static void orinoco_reset(struct net_device *dev) 1871static void orinoco_reset(struct work_struct *work)
1868{ 1872{
1869 struct orinoco_private *priv = netdev_priv(dev); 1873 struct orinoco_private *priv =
1874 container_of(work, struct orinoco_private, reset_work);
1875 struct net_device *dev = priv->ndev;
1870 struct hermes *hw = &priv->hw; 1876 struct hermes *hw = &priv->hw;
1871 int err; 1877 int err;
1872 unsigned long flags; 1878 unsigned long flags;
@@ -2434,9 +2440,9 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2434 priv->hw_unavailable = 1; /* orinoco_init() must clear this 2440 priv->hw_unavailable = 1; /* orinoco_init() must clear this
2435 * before anything else touches the 2441 * before anything else touches the
2436 * hardware */ 2442 * hardware */
2437 INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev); 2443 INIT_WORK(&priv->reset_work, orinoco_reset);
2438 INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev); 2444 INIT_WORK(&priv->join_work, orinoco_join_ap);
2439 INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev); 2445 INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
2440 2446
2441 netif_carrier_off(dev); 2447 netif_carrier_off(dev);
2442 priv->last_linkstatus = 0xffff; 2448 priv->last_linkstatus = 0xffff;
@@ -3608,7 +3614,7 @@ static int orinoco_ioctl_reset(struct net_device *dev,
3608 printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); 3614 printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
3609 3615
3610 /* Firmware reset */ 3616 /* Firmware reset */
3611 orinoco_reset(dev); 3617 orinoco_reset(&priv->reset_work);
3612 } else { 3618 } else {
3613 printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); 3619 printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
3614 3620
@@ -4154,7 +4160,7 @@ static int orinoco_ioctl_commit(struct net_device *dev,
4154 return 0; 4160 return 0;
4155 4161
4156 if (priv->broken_disableport) { 4162 if (priv->broken_disableport) {
4157 orinoco_reset(dev); 4163 orinoco_reset(&priv->reset_work);
4158 return 0; 4164 return 0;
4159 } 4165 }
4160 4166
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index bc14689cbf24..d08ae8d2726c 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -178,21 +178,6 @@ orinoco_cs_config(struct pcmcia_device *link)
178 cisparse_t parse; 178 cisparse_t parse;
179 void __iomem *mem; 179 void __iomem *mem;
180 180
181 /*
182 * This reads the card's CONFIG tuple to find its
183 * configuration registers.
184 */
185 tuple.DesiredTuple = CISTPL_CONFIG;
186 tuple.Attributes = 0;
187 tuple.TupleData = buf;
188 tuple.TupleDataMax = sizeof(buf);
189 tuple.TupleOffset = 0;
190 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
191 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
192 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
193 link->conf.ConfigBase = parse.config.base;
194 link->conf.Present = parse.config.rmask[0];
195
196 /* Look up the current Vcc */ 181 /* Look up the current Vcc */
197 CS_CHECK(GetConfigurationInfo, 182 CS_CHECK(GetConfigurationInfo,
198 pcmcia_get_configuration_info(link, &conf)); 183 pcmcia_get_configuration_info(link, &conf));
@@ -211,6 +196,10 @@ orinoco_cs_config(struct pcmcia_device *link)
211 * and most client drivers will only use the CIS to fill in 196 * and most client drivers will only use the CIS to fill in
212 * implementation-defined details. 197 * implementation-defined details.
213 */ 198 */
199 tuple.Attributes = 0;
200 tuple.TupleData = buf;
201 tuple.TupleDataMax = sizeof(buf);
202 tuple.TupleOffset = 0;
214 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 203 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
215 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 204 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
216 while (1) { 205 while (1) {
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 4a20e45de3ca..a87eb51886c8 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -157,8 +157,9 @@ prism54_mib_init(islpci_private *priv)
157 * schedule_work(), thus we can as well use sleeping semaphore 157 * schedule_work(), thus we can as well use sleeping semaphore
158 * locking */ 158 * locking */
159void 159void
160prism54_update_stats(islpci_private *priv) 160prism54_update_stats(struct work_struct *work)
161{ 161{
162 islpci_private *priv = container_of(work, islpci_private, stats_work);
162 char *data; 163 char *data;
163 int j; 164 int j;
164 struct obj_bss bss, *bss2; 165 struct obj_bss bss, *bss2;
@@ -2493,9 +2494,10 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
2493 * interrupt context, no locks held. 2494 * interrupt context, no locks held.
2494 */ 2495 */
2495void 2496void
2496prism54_process_trap(void *data) 2497prism54_process_trap(struct work_struct *work)
2497{ 2498{
2498 struct islpci_mgmtframe *frame = data; 2499 struct islpci_mgmtframe *frame =
2500 container_of(work, struct islpci_mgmtframe, ws);
2499 struct net_device *ndev = frame->ndev; 2501 struct net_device *ndev = frame->ndev;
2500 enum oid_num_t n = mgt_oidtonum(frame->header->oid); 2502 enum oid_num_t n = mgt_oidtonum(frame->header->oid);
2501 2503
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index e8183d30c52e..bcfbfb9281d2 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -31,12 +31,12 @@
31void prism54_mib_init(islpci_private *); 31void prism54_mib_init(islpci_private *);
32 32
33struct iw_statistics *prism54_get_wireless_stats(struct net_device *); 33struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
34void prism54_update_stats(islpci_private *); 34void prism54_update_stats(struct work_struct *);
35 35
36void prism54_acl_init(struct islpci_acl *); 36void prism54_acl_init(struct islpci_acl *);
37void prism54_acl_clean(struct islpci_acl *); 37void prism54_acl_clean(struct islpci_acl *);
38 38
39void prism54_process_trap(void *); 39void prism54_process_trap(struct work_struct *);
40 40
41void prism54_wpa_bss_ie_init(islpci_private *priv); 41void prism54_wpa_bss_ie_init(islpci_private *priv);
42void prism54_wpa_bss_ie_clean(islpci_private *priv); 42void prism54_wpa_bss_ie_clean(islpci_private *priv);
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 1e0603ca436c..f057fd9fcd79 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -860,11 +860,10 @@ islpci_setup(struct pci_dev *pdev)
860 priv->state_off = 1; 860 priv->state_off = 1;
861 861
862 /* initialize workqueue's */ 862 /* initialize workqueue's */
863 INIT_WORK(&priv->stats_work, 863 INIT_WORK(&priv->stats_work, prism54_update_stats);
864 (void (*)(void *)) prism54_update_stats, priv);
865 priv->stats_timestamp = 0; 864 priv->stats_timestamp = 0;
866 865
867 INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv); 866 INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
868 priv->reset_task_pending = 0; 867 priv->reset_task_pending = 0;
869 868
870 /* allocate various memory areas */ 869 /* allocate various memory areas */
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 676d83813dc8..b1122912ee2d 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -480,9 +480,9 @@ islpci_eth_receive(islpci_private *priv)
480} 480}
481 481
482void 482void
483islpci_do_reset_and_wake(void *data) 483islpci_do_reset_and_wake(struct work_struct *work)
484{ 484{
485 islpci_private *priv = data; 485 islpci_private *priv = container_of(work, islpci_private, reset_task);
486 486
487 islpci_reset(priv, 1); 487 islpci_reset(priv, 1);
488 priv->reset_task_pending = 0; 488 priv->reset_task_pending = 0;
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 26789454067c..5bf820defbd0 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -67,6 +67,6 @@ void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *);
67int islpci_eth_transmit(struct sk_buff *, struct net_device *); 67int islpci_eth_transmit(struct sk_buff *, struct net_device *);
68int islpci_eth_receive(islpci_private *); 68int islpci_eth_receive(islpci_private *);
69void islpci_eth_tx_timeout(struct net_device *); 69void islpci_eth_tx_timeout(struct net_device *);
70void islpci_do_reset_and_wake(void *data); 70void islpci_do_reset_and_wake(struct work_struct *);
71 71
72#endif /* _ISL_GEN_H */ 72#endif /* _ISL_GEN_H */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 036a875054c9..2246f7930b4e 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -386,7 +386,7 @@ islpci_mgt_receive(struct net_device *ndev)
386 386
387 /* Create work to handle trap out of interrupt 387 /* Create work to handle trap out of interrupt
388 * context. */ 388 * context. */
389 INIT_WORK(&frame->ws, prism54_process_trap, frame); 389 INIT_WORK(&frame->ws, prism54_process_trap);
390 schedule_work(&frame->ws); 390 schedule_work(&frame->ws);
391 391
392 } else { 392 } else {
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 7fbfc9e41d07..88e10c9bc4ac 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -408,11 +408,8 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
408#define MAX_TUPLE_SIZE 128 408#define MAX_TUPLE_SIZE 128
409static int ray_config(struct pcmcia_device *link) 409static int ray_config(struct pcmcia_device *link)
410{ 410{
411 tuple_t tuple;
412 cisparse_t parse;
413 int last_fn = 0, last_ret = 0; 411 int last_fn = 0, last_ret = 0;
414 int i; 412 int i;
415 u_char buf[MAX_TUPLE_SIZE];
416 win_req_t req; 413 win_req_t req;
417 memreq_t mem; 414 memreq_t mem;
418 struct net_device *dev = (struct net_device *)link->priv; 415 struct net_device *dev = (struct net_device *)link->priv;
@@ -420,29 +417,12 @@ static int ray_config(struct pcmcia_device *link)
420 417
421 DEBUG(1, "ray_config(0x%p)\n", link); 418 DEBUG(1, "ray_config(0x%p)\n", link);
422 419
423 /* This reads the card's CONFIG tuple to find its configuration regs */
424 tuple.DesiredTuple = CISTPL_CONFIG;
425 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
426 tuple.TupleData = buf;
427 tuple.TupleDataMax = MAX_TUPLE_SIZE;
428 tuple.TupleOffset = 0;
429 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
430 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
431 link->conf.ConfigBase = parse.config.base;
432 link->conf.Present = parse.config.rmask[0];
433
434 /* Determine card type and firmware version */ 420 /* Determine card type and firmware version */
435 buf[0] = buf[MAX_TUPLE_SIZE - 1] = 0; 421 printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n",
436 tuple.DesiredTuple = CISTPL_VERS_1; 422 link->prod_id[0] ? link->prod_id[0] : " ",
437 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 423 link->prod_id[1] ? link->prod_id[1] : " ",
438 tuple.TupleData = buf; 424 link->prod_id[2] ? link->prod_id[2] : " ",
439 tuple.TupleDataMax = MAX_TUPLE_SIZE; 425 link->prod_id[3] ? link->prod_id[3] : " ");
440 tuple.TupleOffset = 2;
441 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
442
443 for (i=0; i<tuple.TupleDataLen - 4; i++)
444 if (buf[i] == 0) buf[i] = ' ';
445 printk(KERN_INFO "ray_cs Detected: %s\n",buf);
446 426
447 /* Now allocate an interrupt line. Note that this does not 427 /* Now allocate an interrupt line. Note that this does not
448 actually assign a handler to the interrupt. 428 actually assign a handler to the interrupt.
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index bcc7038130f6..cf2d1486b01d 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -647,21 +647,6 @@ spectrum_cs_config(struct pcmcia_device *link)
647 cisparse_t parse; 647 cisparse_t parse;
648 void __iomem *mem; 648 void __iomem *mem;
649 649
650 /*
651 * This reads the card's CONFIG tuple to find its
652 * configuration registers.
653 */
654 tuple.DesiredTuple = CISTPL_CONFIG;
655 tuple.Attributes = 0;
656 tuple.TupleData = buf;
657 tuple.TupleDataMax = sizeof(buf);
658 tuple.TupleOffset = 0;
659 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
660 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
661 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
662 link->conf.ConfigBase = parse.config.base;
663 link->conf.Present = parse.config.rmask[0];
664
665 /* Look up the current Vcc */ 650 /* Look up the current Vcc */
666 CS_CHECK(GetConfigurationInfo, 651 CS_CHECK(GetConfigurationInfo,
667 pcmcia_get_configuration_info(link, &conf)); 652 pcmcia_get_configuration_info(link, &conf));
@@ -681,6 +666,10 @@ spectrum_cs_config(struct pcmcia_device *link)
681 * implementation-defined details. 666 * implementation-defined details.
682 */ 667 */
683 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 668 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
669 tuple.Attributes = 0;
670 tuple.TupleData = buf;
671 tuple.TupleDataMax = sizeof(buf);
672 tuple.TupleOffset = 0;
684 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 673 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
685 while (1) { 674 while (1) {
686 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 675 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index aafb301041b1..233d906c08f0 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -3939,11 +3939,8 @@ wv_hw_reset(struct net_device * dev)
3939static inline int 3939static inline int
3940wv_pcmcia_config(struct pcmcia_device * link) 3940wv_pcmcia_config(struct pcmcia_device * link)
3941{ 3941{
3942 tuple_t tuple;
3943 cisparse_t parse;
3944 struct net_device * dev = (struct net_device *) link->priv; 3942 struct net_device * dev = (struct net_device *) link->priv;
3945 int i; 3943 int i;
3946 u_char buf[64];
3947 win_req_t req; 3944 win_req_t req;
3948 memreq_t mem; 3945 memreq_t mem;
3949 net_local * lp = netdev_priv(dev); 3946 net_local * lp = netdev_priv(dev);
@@ -3953,36 +3950,6 @@ wv_pcmcia_config(struct pcmcia_device * link)
3953 printk(KERN_DEBUG "->wv_pcmcia_config(0x%p)\n", link); 3950 printk(KERN_DEBUG "->wv_pcmcia_config(0x%p)\n", link);
3954#endif 3951#endif
3955 3952
3956 /*
3957 * This reads the card's CONFIG tuple to find its configuration
3958 * registers.
3959 */
3960 do
3961 {
3962 tuple.Attributes = 0;
3963 tuple.DesiredTuple = CISTPL_CONFIG;
3964 i = pcmcia_get_first_tuple(link, &tuple);
3965 if(i != CS_SUCCESS)
3966 break;
3967 tuple.TupleData = (cisdata_t *)buf;
3968 tuple.TupleDataMax = 64;
3969 tuple.TupleOffset = 0;
3970 i = pcmcia_get_tuple_data(link, &tuple);
3971 if(i != CS_SUCCESS)
3972 break;
3973 i = pcmcia_parse_tuple(link, &tuple, &parse);
3974 if(i != CS_SUCCESS)
3975 break;
3976 link->conf.ConfigBase = parse.config.base;
3977 link->conf.Present = parse.config.rmask[0];
3978 }
3979 while(0);
3980 if(i != CS_SUCCESS)
3981 {
3982 cs_error(link, ParseTuple, i);
3983 return FALSE;
3984 }
3985
3986 do 3953 do
3987 { 3954 {
3988 i = pcmcia_request_io(link, &link->io); 3955 i = pcmcia_request_io(link, &link->io);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 5b98a7876982..583e0d655a98 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1966,25 +1966,10 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
1966 */ 1966 */
1967static int wl3501_config(struct pcmcia_device *link) 1967static int wl3501_config(struct pcmcia_device *link)
1968{ 1968{
1969 tuple_t tuple;
1970 cisparse_t parse;
1971 struct net_device *dev = link->priv; 1969 struct net_device *dev = link->priv;
1972 int i = 0, j, last_fn, last_ret; 1970 int i = 0, j, last_fn, last_ret;
1973 unsigned char bf[64];
1974 struct wl3501_card *this; 1971 struct wl3501_card *this;
1975 1972
1976 /* This reads the card's CONFIG tuple to find its config registers. */
1977 tuple.Attributes = 0;
1978 tuple.DesiredTuple = CISTPL_CONFIG;
1979 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
1980 tuple.TupleData = bf;
1981 tuple.TupleDataMax = sizeof(bf);
1982 tuple.TupleOffset = 0;
1983 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
1984 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
1985 link->conf.ConfigBase = parse.config.base;
1986 link->conf.Present = parse.config.rmask[0];
1987
1988 /* Try allocating IO ports. This tries a few fixed addresses. If you 1973 /* Try allocating IO ports. This tries a few fixed addresses. If you
1989 * want, you can also read the card's config table to pick addresses -- 1974 * want, you can also read the card's config table to pick addresses --
1990 * see the serial driver for an example. */ 1975 * see the serial driver for an example. */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 2696f95b9278..f1573a9c2336 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -32,8 +32,8 @@
32 32
33static void ieee_init(struct ieee80211_device *ieee); 33static void ieee_init(struct ieee80211_device *ieee);
34static void softmac_init(struct ieee80211softmac_device *sm); 34static void softmac_init(struct ieee80211softmac_device *sm);
35static void set_rts_cts_work(void *d); 35static void set_rts_cts_work(struct work_struct *work);
36static void set_basic_rates_work(void *d); 36static void set_basic_rates_work(struct work_struct *work);
37 37
38static void housekeeping_init(struct zd_mac *mac); 38static void housekeeping_init(struct zd_mac *mac);
39static void housekeeping_enable(struct zd_mac *mac); 39static void housekeeping_enable(struct zd_mac *mac);
@@ -48,8 +48,8 @@ int zd_mac_init(struct zd_mac *mac,
48 memset(mac, 0, sizeof(*mac)); 48 memset(mac, 0, sizeof(*mac));
49 spin_lock_init(&mac->lock); 49 spin_lock_init(&mac->lock);
50 mac->netdev = netdev; 50 mac->netdev = netdev;
51 INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac); 51 INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
52 INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac); 52 INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work);
53 53
54 ieee_init(ieee); 54 ieee_init(ieee);
55 softmac_init(ieee80211_priv(netdev)); 55 softmac_init(ieee80211_priv(netdev));
@@ -366,9 +366,10 @@ static void try_enable_tx(struct zd_mac *mac)
366 spin_unlock_irqrestore(&mac->lock, flags); 366 spin_unlock_irqrestore(&mac->lock, flags);
367} 367}
368 368
369static void set_rts_cts_work(void *d) 369static void set_rts_cts_work(struct work_struct *work)
370{ 370{
371 struct zd_mac *mac = d; 371 struct zd_mac *mac =
372 container_of(work, struct zd_mac, set_rts_cts_work.work);
372 unsigned long flags; 373 unsigned long flags;
373 u8 rts_rate; 374 u8 rts_rate;
374 unsigned int short_preamble; 375 unsigned int short_preamble;
@@ -387,9 +388,10 @@ static void set_rts_cts_work(void *d)
387 try_enable_tx(mac); 388 try_enable_tx(mac);
388} 389}
389 390
390static void set_basic_rates_work(void *d) 391static void set_basic_rates_work(struct work_struct *work)
391{ 392{
392 struct zd_mac *mac = d; 393 struct zd_mac *mac =
394 container_of(work, struct zd_mac, set_basic_rates_work.work);
393 unsigned long flags; 395 unsigned long flags;
394 u16 basic_rates; 396 u16 basic_rates;
395 397
@@ -467,12 +469,13 @@ static void bssinfo_change(struct net_device *netdev, u32 changes)
467 if (need_set_rts_cts && !mac->updating_rts_rate) { 469 if (need_set_rts_cts && !mac->updating_rts_rate) {
468 mac->updating_rts_rate = 1; 470 mac->updating_rts_rate = 1;
469 netif_stop_queue(mac->netdev); 471 netif_stop_queue(mac->netdev);
470 queue_work(zd_workqueue, &mac->set_rts_cts_work); 472 queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0);
471 } 473 }
472 if (need_set_rates && !mac->updating_basic_rates) { 474 if (need_set_rates && !mac->updating_basic_rates) {
473 mac->updating_basic_rates = 1; 475 mac->updating_basic_rates = 1;
474 netif_stop_queue(mac->netdev); 476 netif_stop_queue(mac->netdev);
475 queue_work(zd_workqueue, &mac->set_basic_rates_work); 477 queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work,
478 0);
476 } 479 }
477 spin_unlock_irqrestore(&mac->lock, flags); 480 spin_unlock_irqrestore(&mac->lock, flags);
478} 481}
@@ -1182,9 +1185,10 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
1182 1185
1183#define LINK_LED_WORK_DELAY HZ 1186#define LINK_LED_WORK_DELAY HZ
1184 1187
1185static void link_led_handler(void *p) 1188static void link_led_handler(struct work_struct *work)
1186{ 1189{
1187 struct zd_mac *mac = p; 1190 struct zd_mac *mac =
1191 container_of(work, struct zd_mac, housekeeping.link_led_work.work);
1188 struct zd_chip *chip = &mac->chip; 1192 struct zd_chip *chip = &mac->chip;
1189 struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev); 1193 struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
1190 int is_associated; 1194 int is_associated;
@@ -1205,7 +1209,7 @@ static void link_led_handler(void *p)
1205 1209
1206static void housekeeping_init(struct zd_mac *mac) 1210static void housekeeping_init(struct zd_mac *mac)
1207{ 1211{
1208 INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac); 1212 INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
1209} 1213}
1210 1214
1211static void housekeeping_enable(struct zd_mac *mac) 1215static void housekeeping_enable(struct zd_mac *mac)
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 5dcfb251f02e..d4e8b870409d 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -119,7 +119,7 @@ struct rx_status {
119#define ZD_RX_ERROR 0x80 119#define ZD_RX_ERROR 0x80
120 120
121struct housekeeping { 121struct housekeeping {
122 struct work_struct link_led_work; 122 struct delayed_work link_led_work;
123}; 123};
124 124
125#define ZD_MAC_STATS_BUFFER_SIZE 16 125#define ZD_MAC_STATS_BUFFER_SIZE 16
@@ -133,8 +133,8 @@ struct zd_mac {
133 struct iw_statistics iw_stats; 133 struct iw_statistics iw_stats;
134 134
135 struct housekeeping housekeeping; 135 struct housekeeping housekeeping;
136 struct work_struct set_rts_cts_work; 136 struct delayed_work set_rts_cts_work;
137 struct work_struct set_basic_rates_work; 137 struct delayed_work set_basic_rates_work;
138 138
139 unsigned int stats_count; 139 unsigned int stats_count;
140 u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; 140 u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index fc4bc9b94c74..a83c3db7d18f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -29,7 +29,7 @@
29 29
30struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; 30struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
31 31
32static void wq_sync_buffer(void *); 32static void wq_sync_buffer(struct work_struct *work);
33 33
34#define DEFAULT_TIMER_EXPIRE (HZ / 10) 34#define DEFAULT_TIMER_EXPIRE (HZ / 10)
35static int work_enabled; 35static int work_enabled;
@@ -65,7 +65,7 @@ int alloc_cpu_buffers(void)
65 b->sample_received = 0; 65 b->sample_received = 0;
66 b->sample_lost_overflow = 0; 66 b->sample_lost_overflow = 0;
67 b->cpu = i; 67 b->cpu = i;
68 INIT_WORK(&b->work, wq_sync_buffer, b); 68 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
69 } 69 }
70 return 0; 70 return 0;
71 71
@@ -282,9 +282,10 @@ void oprofile_add_trace(unsigned long pc)
282 * By using schedule_delayed_work_on and then schedule_delayed_work 282 * By using schedule_delayed_work_on and then schedule_delayed_work
283 * we guarantee this will stay on the correct cpu 283 * we guarantee this will stay on the correct cpu
284 */ 284 */
285static void wq_sync_buffer(void * data) 285static void wq_sync_buffer(struct work_struct *work)
286{ 286{
287 struct oprofile_cpu_buffer * b = data; 287 struct oprofile_cpu_buffer * b =
288 container_of(work, struct oprofile_cpu_buffer, work.work);
288 if (b->cpu != smp_processor_id()) { 289 if (b->cpu != smp_processor_id()) {
289 printk("WQ on CPU%d, prefer CPU%d\n", 290 printk("WQ on CPU%d, prefer CPU%d\n",
290 smp_processor_id(), b->cpu); 291 smp_processor_id(), b->cpu);
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 09abb80e0570..49900d9e3235 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -43,7 +43,7 @@ struct oprofile_cpu_buffer {
43 unsigned long sample_lost_overflow; 43 unsigned long sample_lost_overflow;
44 unsigned long backtrace_aborted; 44 unsigned long backtrace_aborted;
45 int cpu; 45 int cpu;
46 struct work_struct work; 46 struct delayed_work work;
47} ____cacheline_aligned; 47} ____cacheline_aligned;
48 48
49extern struct oprofile_cpu_buffer cpu_buffer[]; 49extern struct oprofile_cpu_buffer cpu_buffer[];
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index b953d5907c05..e60b4bf6bae8 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -166,14 +166,6 @@ static int parport_config(struct pcmcia_device *link)
166 166
167 tuple.TupleData = (cisdata_t *)buf; 167 tuple.TupleData = (cisdata_t *)buf;
168 tuple.TupleOffset = 0; tuple.TupleDataMax = 255; 168 tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
169 tuple.Attributes = 0;
170 tuple.DesiredTuple = CISTPL_CONFIG;
171 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
172 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
173 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
174 link->conf.ConfigBase = parse.config.base;
175 link->conf.Present = parse.config.rmask[0];
176
177 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 169 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
178 tuple.Attributes = 0; 170 tuple.Attributes = 0;
179 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 171 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
@@ -263,6 +255,7 @@ void parport_cs_release(struct pcmcia_device *link)
263 255
264static struct pcmcia_device_id parport_ids[] = { 256static struct pcmcia_device_id parport_ids[] = {
265 PCMCIA_DEVICE_FUNC_ID(3), 257 PCMCIA_DEVICE_FUNC_ID(3),
258 PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
266 PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003), 259 PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003),
267 PCMCIA_DEVICE_NULL 260 PCMCIA_DEVICE_NULL
268}; 261};
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index ea2087c34149..50757695844f 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -70,7 +70,7 @@ struct slot {
70 struct hotplug_slot *hotplug_slot; 70 struct hotplug_slot *hotplug_slot;
71 struct list_head slot_list; 71 struct list_head slot_list;
72 char name[SLOT_NAME_SIZE]; 72 char name[SLOT_NAME_SIZE];
73 struct work_struct work; /* work for button event */ 73 struct delayed_work work; /* work for button event */
74 struct mutex lock; 74 struct mutex lock;
75}; 75};
76 76
@@ -187,7 +187,7 @@ extern int shpchp_configure_device(struct slot *p_slot);
187extern int shpchp_unconfigure_device(struct slot *p_slot); 187extern int shpchp_unconfigure_device(struct slot *p_slot);
188extern void shpchp_remove_ctrl_files(struct controller *ctrl); 188extern void shpchp_remove_ctrl_files(struct controller *ctrl);
189extern void cleanup_slots(struct controller *ctrl); 189extern void cleanup_slots(struct controller *ctrl);
190extern void queue_pushbutton_work(void *data); 190extern void queue_pushbutton_work(struct work_struct *work);
191 191
192 192
193#ifdef CONFIG_ACPI 193#ifdef CONFIG_ACPI
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 235c18a22393..4eac85b3d90e 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -159,7 +159,7 @@ static int init_slots(struct controller *ctrl)
159 goto error_info; 159 goto error_info;
160 160
161 slot->number = sun; 161 slot->number = sun;
162 INIT_WORK(&slot->work, queue_pushbutton_work, slot); 162 INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work);
163 163
164 /* register this slot with the hotplug pci core */ 164 /* register this slot with the hotplug pci core */
165 hotplug_slot->private = slot; 165 hotplug_slot->private = slot;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index c39901dbff20..158ac7836096 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -36,7 +36,7 @@
36#include "../pci.h" 36#include "../pci.h"
37#include "shpchp.h" 37#include "shpchp.h"
38 38
39static void interrupt_event_handler(void *data); 39static void interrupt_event_handler(struct work_struct *work);
40static int shpchp_enable_slot(struct slot *p_slot); 40static int shpchp_enable_slot(struct slot *p_slot);
41static int shpchp_disable_slot(struct slot *p_slot); 41static int shpchp_disable_slot(struct slot *p_slot);
42 42
@@ -50,7 +50,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
50 50
51 info->event_type = event_type; 51 info->event_type = event_type;
52 info->p_slot = p_slot; 52 info->p_slot = p_slot;
53 INIT_WORK(&info->work, interrupt_event_handler, info); 53 INIT_WORK(&info->work, interrupt_event_handler);
54 54
55 schedule_work(&info->work); 55 schedule_work(&info->work);
56 56
@@ -408,9 +408,10 @@ struct pushbutton_work_info {
408 * Handles all pending events and exits. 408 * Handles all pending events and exits.
409 * 409 *
410 */ 410 */
411static void shpchp_pushbutton_thread(void *data) 411static void shpchp_pushbutton_thread(struct work_struct *work)
412{ 412{
413 struct pushbutton_work_info *info = data; 413 struct pushbutton_work_info *info =
414 container_of(work, struct pushbutton_work_info, work);
414 struct slot *p_slot = info->p_slot; 415 struct slot *p_slot = info->p_slot;
415 416
416 mutex_lock(&p_slot->lock); 417 mutex_lock(&p_slot->lock);
@@ -436,9 +437,9 @@ static void shpchp_pushbutton_thread(void *data)
436 kfree(info); 437 kfree(info);
437} 438}
438 439
439void queue_pushbutton_work(void *data) 440void queue_pushbutton_work(struct work_struct *work)
440{ 441{
441 struct slot *p_slot = data; 442 struct slot *p_slot = container_of(work, struct slot, work.work);
442 struct pushbutton_work_info *info; 443 struct pushbutton_work_info *info;
443 444
444 info = kmalloc(sizeof(*info), GFP_KERNEL); 445 info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -447,7 +448,7 @@ void queue_pushbutton_work(void *data)
447 return; 448 return;
448 } 449 }
449 info->p_slot = p_slot; 450 info->p_slot = p_slot;
450 INIT_WORK(&info->work, shpchp_pushbutton_thread, info); 451 INIT_WORK(&info->work, shpchp_pushbutton_thread);
451 452
452 mutex_lock(&p_slot->lock); 453 mutex_lock(&p_slot->lock);
453 switch (p_slot->state) { 454 switch (p_slot->state) {
@@ -541,9 +542,9 @@ static void handle_button_press_event(struct slot *p_slot)
541 } 542 }
542} 543}
543 544
544static void interrupt_event_handler(void *data) 545static void interrupt_event_handler(struct work_struct *work)
545{ 546{
546 struct event_info *info = data; 547 struct event_info *info = container_of(work, struct event_info, work);
547 struct slot *p_slot = info->p_slot; 548 struct slot *p_slot = info->p_slot;
548 549
549 mutex_lock(&p_slot->lock); 550 mutex_lock(&p_slot->lock);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 04c43ef529ac..55866b6b26fa 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -160,7 +160,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
160 rpc->e_lock = SPIN_LOCK_UNLOCKED; 160 rpc->e_lock = SPIN_LOCK_UNLOCKED;
161 161
162 rpc->rpd = dev; 162 rpc->rpd = dev;
163 INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev); 163 INIT_WORK(&rpc->dpc_handler, aer_isr);
164 rpc->prod_idx = rpc->cons_idx = 0; 164 rpc->prod_idx = rpc->cons_idx = 0;
165 mutex_init(&rpc->rpc_mutex); 165 mutex_init(&rpc->rpc_mutex);
166 init_waitqueue_head(&rpc->wait_release); 166 init_waitqueue_head(&rpc->wait_release);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index daf0cad88fc8..3c0a58f64dd8 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -118,7 +118,7 @@ extern struct bus_type pcie_port_bus_type;
118extern void aer_enable_rootport(struct aer_rpc *rpc); 118extern void aer_enable_rootport(struct aer_rpc *rpc);
119extern void aer_delete_rootport(struct aer_rpc *rpc); 119extern void aer_delete_rootport(struct aer_rpc *rpc);
120extern int aer_init(struct pcie_device *dev); 120extern int aer_init(struct pcie_device *dev);
121extern void aer_isr(void *context); 121extern void aer_isr(struct work_struct *work);
122extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 122extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
123extern int aer_osc_setup(struct pci_dev *dev); 123extern int aer_osc_setup(struct pci_dev *dev);
124 124
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 1c7e660d6535..08e13033ced8 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -690,14 +690,14 @@ static void aer_isr_one_error(struct pcie_device *p_device,
690 690
691/** 691/**
692 * aer_isr - consume errors detected by root port 692 * aer_isr - consume errors detected by root port
693 * @context: pointer to a private data of pcie device 693 * @work: definition of this work item
694 * 694 *
695 * Invoked, as DPC, when root port records new detected error 695 * Invoked, as DPC, when root port records new detected error
696 **/ 696 **/
697void aer_isr(void *context) 697void aer_isr(struct work_struct *work)
698{ 698{
699 struct pcie_device *p_device = (struct pcie_device *) context; 699 struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
700 struct aer_rpc *rpc = get_service_data(p_device); 700 struct pcie_device *p_device = rpc->rpd;
701 struct aer_err_source *e_src; 701 struct aer_err_source *e_src;
702 702
703 mutex_lock(&rpc->rpc_mutex); 703 mutex_lock(&rpc->rpc_mutex);
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 3bcb7dc32995..b6746301d9a9 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -32,10 +32,11 @@
32 * A0..A10 work in each range; A23 indicates I/O space; A25 is CFRNW; 32 * A0..A10 work in each range; A23 indicates I/O space; A25 is CFRNW;
33 * some other bit in {A24,A22..A11} is nREG to flag memory access 33 * some other bit in {A24,A22..A11} is nREG to flag memory access
34 * (vs attributes). So more than 2KB/region would just be waste. 34 * (vs attributes). So more than 2KB/region would just be waste.
35 * Note: These are offsets from the physical base address.
35 */ 36 */
36#define CF_ATTR_PHYS (AT91_CF_BASE) 37#define CF_ATTR_PHYS (0)
37#define CF_IO_PHYS (AT91_CF_BASE + (1 << 23)) 38#define CF_IO_PHYS (1 << 23)
38#define CF_MEM_PHYS (AT91_CF_BASE + 0x017ff800) 39#define CF_MEM_PHYS (0x017ff800)
39 40
40/*--------------------------------------------------------------------------*/ 41/*--------------------------------------------------------------------------*/
41 42
@@ -48,6 +49,8 @@ struct at91_cf_socket {
48 49
49 struct platform_device *pdev; 50 struct platform_device *pdev;
50 struct at91_cf_data *board; 51 struct at91_cf_data *board;
52
53 unsigned long phys_baseaddr;
51}; 54};
52 55
53#define SZ_2K (2 * SZ_1K) 56#define SZ_2K (2 * SZ_1K)
@@ -154,9 +157,8 @@ static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
154 157
155 /* 158 /*
156 * Use 16 bit accesses unless/until we need 8-bit i/o space. 159 * Use 16 bit accesses unless/until we need 8-bit i/o space.
157 * Always set CSR4 ... PCMCIA won't always unmap things.
158 */ 160 */
159 csr = at91_sys_read(AT91_SMC_CSR(4)) & ~AT91_SMC_DBW; 161 csr = at91_sys_read(AT91_SMC_CSR(cf->board->chipselect)) & ~AT91_SMC_DBW;
160 162
161 /* 163 /*
162 * NOTE: this CF controller ignores IOIS16, so we can't really do 164 * NOTE: this CF controller ignores IOIS16, so we can't really do
@@ -168,14 +170,14 @@ static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
168 * some cards only like that way to get at the odd byte, despite 170 * some cards only like that way to get at the odd byte, despite
169 * CF 3.0 spec table 35 also giving the D8-D15 option. 171 * CF 3.0 spec table 35 also giving the D8-D15 option.
170 */ 172 */
171 if (!(io->flags & (MAP_16BIT|MAP_AUTOSZ))) { 173 if (!(io->flags & (MAP_16BIT | MAP_AUTOSZ))) {
172 csr |= AT91_SMC_DBW_8; 174 csr |= AT91_SMC_DBW_8;
173 pr_debug("%s: 8bit i/o bus\n", driver_name); 175 pr_debug("%s: 8bit i/o bus\n", driver_name);
174 } else { 176 } else {
175 csr |= AT91_SMC_DBW_16; 177 csr |= AT91_SMC_DBW_16;
176 pr_debug("%s: 16bit i/o bus\n", driver_name); 178 pr_debug("%s: 16bit i/o bus\n", driver_name);
177 } 179 }
178 at91_sys_write(AT91_SMC_CSR(4), csr); 180 at91_sys_write(AT91_SMC_CSR(cf->board->chipselect), csr);
179 181
180 io->start = cf->socket.io_offset; 182 io->start = cf->socket.io_offset;
181 io->stop = io->start + SZ_2K - 1; 183 io->stop = io->start + SZ_2K - 1;
@@ -194,11 +196,11 @@ at91_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
194 196
195 cf = container_of(s, struct at91_cf_socket, socket); 197 cf = container_of(s, struct at91_cf_socket, socket);
196 198
197 map->flags &= MAP_ACTIVE|MAP_ATTRIB|MAP_16BIT; 199 map->flags &= (MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT);
198 if (map->flags & MAP_ATTRIB) 200 if (map->flags & MAP_ATTRIB)
199 map->static_start = CF_ATTR_PHYS; 201 map->static_start = cf->phys_baseaddr + CF_ATTR_PHYS;
200 else 202 else
201 map->static_start = CF_MEM_PHYS; 203 map->static_start = cf->phys_baseaddr + CF_MEM_PHYS;
202 204
203 return 0; 205 return 0;
204} 206}
@@ -219,7 +221,6 @@ static int __init at91_cf_probe(struct platform_device *pdev)
219 struct at91_cf_socket *cf; 221 struct at91_cf_socket *cf;
220 struct at91_cf_data *board = pdev->dev.platform_data; 222 struct at91_cf_data *board = pdev->dev.platform_data;
221 struct resource *io; 223 struct resource *io;
222 unsigned int csa;
223 int status; 224 int status;
224 225
225 if (!board || !board->det_pin || !board->rst_pin) 226 if (!board || !board->det_pin || !board->rst_pin)
@@ -235,33 +236,11 @@ static int __init at91_cf_probe(struct platform_device *pdev)
235 236
236 cf->board = board; 237 cf->board = board;
237 cf->pdev = pdev; 238 cf->pdev = pdev;
239 cf->phys_baseaddr = io->start;
238 platform_set_drvdata(pdev, cf); 240 platform_set_drvdata(pdev, cf);
239 241
240 /* CF takes over CS4, CS5, CS6 */
241 csa = at91_sys_read(AT91_EBI_CSA);
242 at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS4A_SMC_COMPACTFLASH);
243
244 /* nWAIT is _not_ a default setting */
245 (void) at91_set_A_periph(AT91_PIN_PC6, 1); /* nWAIT */
246
247 /*
248 * Static memory controller timing adjustments.
249 * REVISIT: these timings are in terms of MCK cycles, so
250 * when MCK changes (cpufreq etc) so must these values...
251 */
252 at91_sys_write(AT91_SMC_CSR(4),
253 AT91_SMC_ACSS_STD
254 | AT91_SMC_DBW_16
255 | AT91_SMC_BAT
256 | AT91_SMC_WSEN
257 | AT91_SMC_NWS_(32) /* wait states */
258 | AT91_SMC_RWSETUP_(6) /* setup time */
259 | AT91_SMC_RWHOLD_(4) /* hold time */
260 );
261
262 /* must be a GPIO; ergo must trigger on both edges */ 242 /* must be a GPIO; ergo must trigger on both edges */
263 status = request_irq(board->det_pin, at91_cf_irq, 243 status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf);
264 IRQF_SAMPLE_RANDOM, driver_name, cf);
265 if (status < 0) 244 if (status < 0)
266 goto fail0; 245 goto fail0;
267 device_init_wakeup(&pdev->dev, 1); 246 device_init_wakeup(&pdev->dev, 1);
@@ -282,14 +261,18 @@ static int __init at91_cf_probe(struct platform_device *pdev)
282 cf->socket.pci_irq = NR_IRQS + 1; 261 cf->socket.pci_irq = NR_IRQS + 1;
283 262
284 /* pcmcia layer only remaps "real" memory not iospace */ 263 /* pcmcia layer only remaps "real" memory not iospace */
285 cf->socket.io_offset = (unsigned long) ioremap(CF_IO_PHYS, SZ_2K); 264 cf->socket.io_offset = (unsigned long) ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K);
286 if (!cf->socket.io_offset) 265 if (!cf->socket.io_offset) {
266 status = -ENXIO;
287 goto fail1; 267 goto fail1;
268 }
288 269
289 /* reserve CS4, CS5, and CS6 regions; but use just CS4 */ 270 /* reserve chip-select regions */
290 if (!request_mem_region(io->start, io->end + 1 - io->start, 271 if (!request_mem_region(io->start, io->end + 1 - io->start,
291 driver_name)) 272 driver_name)) {
273 status = -ENXIO;
292 goto fail1; 274 goto fail1;
275 }
293 276
294 pr_info("%s: irqs det #%d, io #%d\n", driver_name, 277 pr_info("%s: irqs det #%d, io #%d\n", driver_name,
295 board->det_pin, board->irq_pin); 278 board->det_pin, board->irq_pin);
@@ -319,9 +302,7 @@ fail1:
319fail0a: 302fail0a:
320 device_init_wakeup(&pdev->dev, 0); 303 device_init_wakeup(&pdev->dev, 0);
321 free_irq(board->det_pin, cf); 304 free_irq(board->det_pin, cf);
322 device_init_wakeup(&pdev->dev, 0);
323fail0: 305fail0:
324 at91_sys_write(AT91_EBI_CSA, csa);
325 kfree(cf); 306 kfree(cf);
326 return status; 307 return status;
327} 308}
@@ -331,19 +312,15 @@ static int __exit at91_cf_remove(struct platform_device *pdev)
331 struct at91_cf_socket *cf = platform_get_drvdata(pdev); 312 struct at91_cf_socket *cf = platform_get_drvdata(pdev);
332 struct at91_cf_data *board = cf->board; 313 struct at91_cf_data *board = cf->board;
333 struct resource *io = cf->socket.io[0].res; 314 struct resource *io = cf->socket.io[0].res;
334 unsigned int csa;
335 315
336 pcmcia_unregister_socket(&cf->socket); 316 pcmcia_unregister_socket(&cf->socket);
337 if (board->irq_pin) 317 if (board->irq_pin)
338 free_irq(board->irq_pin, cf); 318 free_irq(board->irq_pin, cf);
339 free_irq(board->det_pin, cf);
340 device_init_wakeup(&pdev->dev, 0); 319 device_init_wakeup(&pdev->dev, 0);
320 free_irq(board->det_pin, cf);
341 iounmap((void __iomem *) cf->socket.io_offset); 321 iounmap((void __iomem *) cf->socket.io_offset);
342 release_mem_region(io->start, io->end + 1 - io->start); 322 release_mem_region(io->start, io->end + 1 - io->start);
343 323
344 csa = at91_sys_read(AT91_EBI_CSA);
345 at91_sys_write(AT91_EBI_CSA, csa & ~AT91_EBI_CS4A);
346
347 kfree(cf); 324 kfree(cf);
348 return 0; 325 return 0;
349} 326}
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index d6164cd583fd..f573ea04db6f 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -135,7 +135,7 @@ int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev, cs_s
135struct pcmcia_callback{ 135struct pcmcia_callback{
136 struct module *owner; 136 struct module *owner;
137 int (*event) (struct pcmcia_socket *s, event_t event, int priority); 137 int (*event) (struct pcmcia_socket *s, event_t event, int priority);
138 void (*requery) (struct pcmcia_socket *s); 138 void (*requery) (struct pcmcia_socket *s, int new_cis);
139 int (*suspend) (struct pcmcia_socket *s); 139 int (*suspend) (struct pcmcia_socket *s);
140 int (*resume) (struct pcmcia_socket *s); 140 int (*resume) (struct pcmcia_socket *s);
141}; 141};
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 21d83a895b21..7355eb455a88 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -231,65 +231,6 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv)
231} 231}
232 232
233 233
234#ifdef CONFIG_PCMCIA_LOAD_CIS
235
236/**
237 * pcmcia_load_firmware - load CIS from userspace if device-provided is broken
238 * @dev - the pcmcia device which needs a CIS override
239 * @filename - requested filename in /lib/firmware/
240 *
241 * This uses the in-kernel firmware loading mechanism to use a "fake CIS" if
242 * the one provided by the card is broken. The firmware files reside in
243 * /lib/firmware/ in userspace.
244 */
245static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
246{
247 struct pcmcia_socket *s = dev->socket;
248 const struct firmware *fw;
249 char path[20];
250 int ret=-ENOMEM;
251 cisdump_t *cis;
252
253 if (!filename)
254 return -EINVAL;
255
256 ds_dbg(1, "trying to load firmware %s\n", filename);
257
258 if (strlen(filename) > 14)
259 return -EINVAL;
260
261 snprintf(path, 20, "%s", filename);
262
263 if (request_firmware(&fw, path, &dev->dev) == 0) {
264 if (fw->size >= CISTPL_MAX_CIS_SIZE)
265 goto release;
266
267 cis = kzalloc(sizeof(cisdump_t), GFP_KERNEL);
268 if (!cis)
269 goto release;
270
271 cis->Length = fw->size + 1;
272 memcpy(cis->Data, fw->data, fw->size);
273
274 if (!pcmcia_replace_cis(s, cis))
275 ret = 0;
276 }
277 release:
278 release_firmware(fw);
279
280 return (ret);
281}
282
283#else /* !CONFIG_PCMCIA_LOAD_CIS */
284
285static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
286{
287 return -ENODEV;
288}
289
290#endif
291
292
293/*======================================================================*/ 234/*======================================================================*/
294 235
295 236
@@ -309,6 +250,8 @@ int pcmcia_register_driver(struct pcmcia_driver *driver)
309 driver->drv.bus = &pcmcia_bus_type; 250 driver->drv.bus = &pcmcia_bus_type;
310 driver->drv.owner = driver->owner; 251 driver->drv.owner = driver->owner;
311 252
253 ds_dbg(3, "registering driver %s\n", driver->drv.name);
254
312 return driver_register(&driver->drv); 255 return driver_register(&driver->drv);
313} 256}
314EXPORT_SYMBOL(pcmcia_register_driver); 257EXPORT_SYMBOL(pcmcia_register_driver);
@@ -318,6 +261,7 @@ EXPORT_SYMBOL(pcmcia_register_driver);
318 */ 261 */
319void pcmcia_unregister_driver(struct pcmcia_driver *driver) 262void pcmcia_unregister_driver(struct pcmcia_driver *driver)
320{ 263{
264 ds_dbg(3, "unregistering driver %s\n", driver->drv.name);
321 driver_unregister(&driver->drv); 265 driver_unregister(&driver->drv);
322} 266}
323EXPORT_SYMBOL(pcmcia_unregister_driver); 267EXPORT_SYMBOL(pcmcia_unregister_driver);
@@ -343,23 +287,27 @@ void pcmcia_put_dev(struct pcmcia_device *p_dev)
343static void pcmcia_release_function(struct kref *ref) 287static void pcmcia_release_function(struct kref *ref)
344{ 288{
345 struct config_t *c = container_of(ref, struct config_t, ref); 289 struct config_t *c = container_of(ref, struct config_t, ref);
290 ds_dbg(1, "releasing config_t\n");
346 kfree(c); 291 kfree(c);
347} 292}
348 293
349static void pcmcia_release_dev(struct device *dev) 294static void pcmcia_release_dev(struct device *dev)
350{ 295{
351 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 296 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
352 ds_dbg(1, "releasing dev %p\n", p_dev); 297 ds_dbg(1, "releasing device %s\n", p_dev->dev.bus_id);
353 pcmcia_put_socket(p_dev->socket); 298 pcmcia_put_socket(p_dev->socket);
354 kfree(p_dev->devname); 299 kfree(p_dev->devname);
355 kref_put(&p_dev->function_config->ref, pcmcia_release_function); 300 kref_put(&p_dev->function_config->ref, pcmcia_release_function);
356 kfree(p_dev); 301 kfree(p_dev);
357} 302}
358 303
359static void pcmcia_add_pseudo_device(struct pcmcia_socket *s) 304static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc)
360{ 305{
361 if (!s->pcmcia_state.device_add_pending) { 306 if (!s->pcmcia_state.device_add_pending) {
307 ds_dbg(1, "scheduling to add %s secondary"
308 " device to %d\n", mfc ? "mfc" : "pfc", s->sock);
362 s->pcmcia_state.device_add_pending = 1; 309 s->pcmcia_state.device_add_pending = 1;
310 s->pcmcia_state.mfc_pfc = mfc;
363 schedule_work(&s->device_add); 311 schedule_work(&s->device_add);
364 } 312 }
365 return; 313 return;
@@ -371,6 +319,7 @@ static int pcmcia_device_probe(struct device * dev)
371 struct pcmcia_driver *p_drv; 319 struct pcmcia_driver *p_drv;
372 struct pcmcia_device_id *did; 320 struct pcmcia_device_id *did;
373 struct pcmcia_socket *s; 321 struct pcmcia_socket *s;
322 cistpl_config_t cis_config;
374 int ret = 0; 323 int ret = 0;
375 324
376 dev = get_device(dev); 325 dev = get_device(dev);
@@ -381,15 +330,33 @@ static int pcmcia_device_probe(struct device * dev)
381 p_drv = to_pcmcia_drv(dev->driver); 330 p_drv = to_pcmcia_drv(dev->driver);
382 s = p_dev->socket; 331 s = p_dev->socket;
383 332
333 ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id,
334 p_drv->drv.name);
335
384 if ((!p_drv->probe) || (!p_dev->function_config) || 336 if ((!p_drv->probe) || (!p_dev->function_config) ||
385 (!try_module_get(p_drv->owner))) { 337 (!try_module_get(p_drv->owner))) {
386 ret = -EINVAL; 338 ret = -EINVAL;
387 goto put_dev; 339 goto put_dev;
388 } 340 }
389 341
342 /* set up some more device information */
343 ret = pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_CONFIG,
344 &cis_config);
345 if (!ret) {
346 p_dev->conf.ConfigBase = cis_config.base;
347 p_dev->conf.Present = cis_config.rmask[0];
348 } else {
349 printk(KERN_INFO "pcmcia: could not parse base and rmask0 of CIS\n");
350 p_dev->conf.ConfigBase = 0;
351 p_dev->conf.Present = 0;
352 }
353
390 ret = p_drv->probe(p_dev); 354 ret = p_drv->probe(p_dev);
391 if (ret) 355 if (ret) {
356 ds_dbg(1, "binding %s to %s failed with %d\n",
357 p_dev->dev.bus_id, p_drv->drv.name, ret);
392 goto put_module; 358 goto put_module;
359 }
393 360
394 /* handle pseudo multifunction devices: 361 /* handle pseudo multifunction devices:
395 * there are at most two pseudo multifunction devices. 362 * there are at most two pseudo multifunction devices.
@@ -400,7 +367,7 @@ static int pcmcia_device_probe(struct device * dev)
400 did = p_dev->dev.driver_data; 367 did = p_dev->dev.driver_data;
401 if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) && 368 if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
402 (p_dev->socket->device_count == 1) && (p_dev->device_no == 0)) 369 (p_dev->socket->device_count == 1) && (p_dev->device_no == 0))
403 pcmcia_add_pseudo_device(p_dev->socket); 370 pcmcia_add_device_later(p_dev->socket, 0);
404 371
405 put_module: 372 put_module:
406 if (ret) 373 if (ret)
@@ -421,8 +388,8 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
421 struct pcmcia_device *tmp; 388 struct pcmcia_device *tmp;
422 unsigned long flags; 389 unsigned long flags;
423 390
424 ds_dbg(2, "unbind_request(%d)\n", s->sock); 391 ds_dbg(2, "pcmcia_card_remove(%d) %s\n", s->sock,
425 392 leftover ? leftover->devname : "");
426 393
427 if (!leftover) 394 if (!leftover)
428 s->device_count = 0; 395 s->device_count = 0;
@@ -439,6 +406,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
439 p_dev->_removed=1; 406 p_dev->_removed=1;
440 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 407 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
441 408
409 ds_dbg(2, "unregistering device %s\n", p_dev->dev.bus_id);
442 device_unregister(&p_dev->dev); 410 device_unregister(&p_dev->dev);
443 } 411 }
444 412
@@ -455,6 +423,8 @@ static int pcmcia_device_remove(struct device * dev)
455 p_dev = to_pcmcia_dev(dev); 423 p_dev = to_pcmcia_dev(dev);
456 p_drv = to_pcmcia_drv(dev->driver); 424 p_drv = to_pcmcia_drv(dev->driver);
457 425
426 ds_dbg(1, "removing device %s\n", p_dev->dev.bus_id);
427
458 /* If we're removing the primary module driving a 428 /* If we're removing the primary module driving a
459 * pseudo multi-function card, we need to unbind 429 * pseudo multi-function card, we need to unbind
460 * all devices 430 * all devices
@@ -587,8 +557,10 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
587 557
588 mutex_lock(&device_add_lock); 558 mutex_lock(&device_add_lock);
589 559
590 /* max of 2 devices per card */ 560 ds_dbg(3, "adding device to %d, function %d\n", s->sock, function);
591 if (s->device_count == 2) 561
562 /* max of 4 devices per card */
563 if (s->device_count == 4)
592 goto err_put; 564 goto err_put;
593 565
594 p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL); 566 p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL);
@@ -598,8 +570,6 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
598 p_dev->socket = s; 570 p_dev->socket = s;
599 p_dev->device_no = (s->device_count++); 571 p_dev->device_no = (s->device_count++);
600 p_dev->func = function; 572 p_dev->func = function;
601 if (s->functions <= function)
602 s->functions = function + 1;
603 573
604 p_dev->dev.bus = &pcmcia_bus_type; 574 p_dev->dev.bus = &pcmcia_bus_type;
605 p_dev->dev.parent = s->dev.dev; 575 p_dev->dev.parent = s->dev.dev;
@@ -610,8 +580,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
610 if (!p_dev->devname) 580 if (!p_dev->devname)
611 goto err_free; 581 goto err_free;
612 sprintf (p_dev->devname, "pcmcia%s", p_dev->dev.bus_id); 582 sprintf (p_dev->devname, "pcmcia%s", p_dev->dev.bus_id);
583 ds_dbg(3, "devname is %s\n", p_dev->devname);
613 584
614 /* compat */
615 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 585 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
616 586
617 /* 587 /*
@@ -631,6 +601,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
631 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 601 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
632 602
633 if (!p_dev->function_config) { 603 if (!p_dev->function_config) {
604 ds_dbg(3, "creating config_t for %s\n", p_dev->dev.bus_id);
634 p_dev->function_config = kzalloc(sizeof(struct config_t), 605 p_dev->function_config = kzalloc(sizeof(struct config_t),
635 GFP_KERNEL); 606 GFP_KERNEL);
636 if (!p_dev->function_config) 607 if (!p_dev->function_config)
@@ -674,11 +645,16 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
674 unsigned int no_funcs, i; 645 unsigned int no_funcs, i;
675 int ret = 0; 646 int ret = 0;
676 647
677 if (!(s->resource_setup_done)) 648 if (!(s->resource_setup_done)) {
649 ds_dbg(3, "no resources available, delaying card_add\n");
678 return -EAGAIN; /* try again, but later... */ 650 return -EAGAIN; /* try again, but later... */
651 }
679 652
680 if (pcmcia_validate_mem(s)) 653 if (pcmcia_validate_mem(s)) {
654 ds_dbg(3, "validating mem resources failed, "
655 "delaying card_add\n");
681 return -EAGAIN; /* try again, but later... */ 656 return -EAGAIN; /* try again, but later... */
657 }
682 658
683 ret = pccard_validate_cis(s, BIND_FN_ALL, &cisinfo); 659 ret = pccard_validate_cis(s, BIND_FN_ALL, &cisinfo);
684 if (ret || !cisinfo.Chains) { 660 if (ret || !cisinfo.Chains) {
@@ -690,6 +666,7 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
690 no_funcs = mfc.nfn; 666 no_funcs = mfc.nfn;
691 else 667 else
692 no_funcs = 1; 668 no_funcs = 1;
669 s->functions = no_funcs;
693 670
694 for (i=0; i < no_funcs; i++) 671 for (i=0; i < no_funcs; i++)
695 pcmcia_device_add(s, i); 672 pcmcia_device_add(s, i);
@@ -698,38 +675,50 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
698} 675}
699 676
700 677
701static void pcmcia_delayed_add_pseudo_device(void *data) 678static void pcmcia_delayed_add_device(struct work_struct *work)
702{ 679{
703 struct pcmcia_socket *s = data; 680 struct pcmcia_socket *s =
704 pcmcia_device_add(s, 0); 681 container_of(work, struct pcmcia_socket, device_add);
682 ds_dbg(1, "adding additional device to %d\n", s->sock);
683 pcmcia_device_add(s, s->pcmcia_state.mfc_pfc);
705 s->pcmcia_state.device_add_pending = 0; 684 s->pcmcia_state.device_add_pending = 0;
685 s->pcmcia_state.mfc_pfc = 0;
706} 686}
707 687
708static int pcmcia_requery(struct device *dev, void * _data) 688static int pcmcia_requery(struct device *dev, void * _data)
709{ 689{
710 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 690 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
711 if (!p_dev->dev.driver) 691 if (!p_dev->dev.driver) {
692 ds_dbg(1, "update device information for %s\n",
693 p_dev->dev.bus_id);
712 pcmcia_device_query(p_dev); 694 pcmcia_device_query(p_dev);
695 }
713 696
714 return 0; 697 return 0;
715} 698}
716 699
717static void pcmcia_bus_rescan(struct pcmcia_socket *skt) 700static void pcmcia_bus_rescan(struct pcmcia_socket *skt, int new_cis)
718{ 701{
719 int no_devices=0; 702 int no_devices = 0;
720 int ret = 0; 703 int ret = 0;
721 unsigned long flags; 704 unsigned long flags;
722 705
723 /* must be called with skt_mutex held */ 706 /* must be called with skt_mutex held */
707 ds_dbg(0, "re-scanning socket %d\n", skt->sock);
708
724 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 709 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
725 if (list_empty(&skt->devices_list)) 710 if (list_empty(&skt->devices_list))
726 no_devices=1; 711 no_devices = 1;
727 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 712 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
728 713
714 /* If this is because of a CIS override, start over */
715 if (new_cis && !no_devices)
716 pcmcia_card_remove(skt, NULL);
717
729 /* if no devices were added for this socket yet because of 718 /* if no devices were added for this socket yet because of
730 * missing resource information or other trouble, we need to 719 * missing resource information or other trouble, we need to
731 * do this now. */ 720 * do this now. */
732 if (no_devices) { 721 if (no_devices || new_cis) {
733 ret = pcmcia_card_add(skt); 722 ret = pcmcia_card_add(skt);
734 if (ret) 723 if (ret)
735 return; 724 return;
@@ -747,6 +736,97 @@ static void pcmcia_bus_rescan(struct pcmcia_socket *skt)
747 printk(KERN_INFO "pcmcia: bus_rescan_devices failed\n"); 736 printk(KERN_INFO "pcmcia: bus_rescan_devices failed\n");
748} 737}
749 738
739#ifdef CONFIG_PCMCIA_LOAD_CIS
740
741/**
742 * pcmcia_load_firmware - load CIS from userspace if device-provided is broken
743 * @dev - the pcmcia device which needs a CIS override
744 * @filename - requested filename in /lib/firmware/
745 *
746 * This uses the in-kernel firmware loading mechanism to use a "fake CIS" if
747 * the one provided by the card is broken. The firmware files reside in
748 * /lib/firmware/ in userspace.
749 */
750static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
751{
752 struct pcmcia_socket *s = dev->socket;
753 const struct firmware *fw;
754 char path[20];
755 int ret = -ENOMEM;
756 int no_funcs;
757 int old_funcs;
758 cisdump_t *cis;
759 cistpl_longlink_mfc_t mfc;
760
761 if (!filename)
762 return -EINVAL;
763
764 ds_dbg(1, "trying to load CIS file %s\n", filename);
765
766 if (strlen(filename) > 14) {
767 printk(KERN_WARNING "pcmcia: CIS filename is too long\n");
768 return -EINVAL;
769 }
770
771 snprintf(path, 20, "%s", filename);
772
773 if (request_firmware(&fw, path, &dev->dev) == 0) {
774 if (fw->size >= CISTPL_MAX_CIS_SIZE) {
775 ret = -EINVAL;
776 printk(KERN_ERR "pcmcia: CIS override is too big\n");
777 goto release;
778 }
779
780 cis = kzalloc(sizeof(cisdump_t), GFP_KERNEL);
781 if (!cis) {
782 ret = -ENOMEM;
783 goto release;
784 }
785
786 cis->Length = fw->size + 1;
787 memcpy(cis->Data, fw->data, fw->size);
788
789 if (!pcmcia_replace_cis(s, cis))
790 ret = 0;
791 else {
792 printk(KERN_ERR "pcmcia: CIS override failed\n");
793 goto release;
794 }
795
796
797 /* update information */
798 pcmcia_device_query(dev);
799
800 /* does this cis override add or remove functions? */
801 old_funcs = s->functions;
802
803 if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc))
804 no_funcs = mfc.nfn;
805 else
806 no_funcs = 1;
807 s->functions = no_funcs;
808
809 if (old_funcs > no_funcs)
810 pcmcia_card_remove(s, dev);
811 else if (no_funcs > old_funcs)
812 pcmcia_add_device_later(s, 1);
813 }
814 release:
815 release_firmware(fw);
816
817 return (ret);
818}
819
820#else /* !CONFIG_PCMCIA_LOAD_CIS */
821
822static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
823{
824 return -ENODEV;
825}
826
827#endif
828
829
750static inline int pcmcia_devmatch(struct pcmcia_device *dev, 830static inline int pcmcia_devmatch(struct pcmcia_device *dev,
751 struct pcmcia_device_id *did) 831 struct pcmcia_device_id *did)
752{ 832{
@@ -813,11 +893,14 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
813 * after it has re-checked that there is no possible module 893 * after it has re-checked that there is no possible module
814 * with a prod_id/manf_id/card_id match. 894 * with a prod_id/manf_id/card_id match.
815 */ 895 */
896 ds_dbg(0, "skipping FUNC_ID match for %s until userspace "
897 "interaction\n", dev->dev.bus_id);
816 if (!dev->allow_func_id_match) 898 if (!dev->allow_func_id_match)
817 return 0; 899 return 0;
818 } 900 }
819 901
820 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { 902 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) {
903 ds_dbg(0, "device %s needs a fake CIS\n", dev->dev.bus_id);
821 if (!dev->socket->fake_cis) 904 if (!dev->socket->fake_cis)
822 pcmcia_load_firmware(dev, did->cisfile); 905 pcmcia_load_firmware(dev, did->cisfile);
823 906
@@ -847,13 +930,21 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
847 930
848#ifdef CONFIG_PCMCIA_IOCTL 931#ifdef CONFIG_PCMCIA_IOCTL
849 /* matching by cardmgr */ 932 /* matching by cardmgr */
850 if (p_dev->cardmgr == p_drv) 933 if (p_dev->cardmgr == p_drv) {
934 ds_dbg(0, "cardmgr matched %s to %s\n", dev->bus_id,
935 drv->name);
851 return 1; 936 return 1;
937 }
852#endif 938#endif
853 939
854 while (did && did->match_flags) { 940 while (did && did->match_flags) {
855 if (pcmcia_devmatch(p_dev, did)) 941 ds_dbg(3, "trying to match %s to %s\n", dev->bus_id,
942 drv->name);
943 if (pcmcia_devmatch(p_dev, did)) {
944 ds_dbg(0, "matched %s to %s\n", dev->bus_id,
945 drv->name);
856 return 1; 946 return 1;
947 }
857 did++; 948 did++;
858 } 949 }
859 950
@@ -1044,6 +1135,8 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
1044 struct pcmcia_driver *p_drv = NULL; 1135 struct pcmcia_driver *p_drv = NULL;
1045 int ret = 0; 1136 int ret = 0;
1046 1137
1138 ds_dbg(2, "suspending %s\n", dev->bus_id);
1139
1047 if (dev->driver) 1140 if (dev->driver)
1048 p_drv = to_pcmcia_drv(dev->driver); 1141 p_drv = to_pcmcia_drv(dev->driver);
1049 1142
@@ -1052,12 +1145,18 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
1052 1145
1053 if (p_drv->suspend) { 1146 if (p_drv->suspend) {
1054 ret = p_drv->suspend(p_dev); 1147 ret = p_drv->suspend(p_dev);
1055 if (ret) 1148 if (ret) {
1149 printk(KERN_ERR "pcmcia: device %s (driver %s) did "
1150 "not want to go to sleep (%d)\n",
1151 p_dev->devname, p_drv->drv.name, ret);
1056 goto out; 1152 goto out;
1153 }
1057 } 1154 }
1058 1155
1059 if (p_dev->device_no == p_dev->func) 1156 if (p_dev->device_no == p_dev->func) {
1157 ds_dbg(2, "releasing configuration for %s\n", dev->bus_id);
1060 pcmcia_release_configuration(p_dev); 1158 pcmcia_release_configuration(p_dev);
1159 }
1061 1160
1062 out: 1161 out:
1063 if (!ret) 1162 if (!ret)
@@ -1072,6 +1171,8 @@ static int pcmcia_dev_resume(struct device * dev)
1072 struct pcmcia_driver *p_drv = NULL; 1171 struct pcmcia_driver *p_drv = NULL;
1073 int ret = 0; 1172 int ret = 0;
1074 1173
1174 ds_dbg(2, "resuming %s\n", dev->bus_id);
1175
1075 if (dev->driver) 1176 if (dev->driver)
1076 p_drv = to_pcmcia_drv(dev->driver); 1177 p_drv = to_pcmcia_drv(dev->driver);
1077 1178
@@ -1079,6 +1180,7 @@ static int pcmcia_dev_resume(struct device * dev)
1079 goto out; 1180 goto out;
1080 1181
1081 if (p_dev->device_no == p_dev->func) { 1182 if (p_dev->device_no == p_dev->func) {
1183 ds_dbg(2, "requesting configuration for %s\n", dev->bus_id);
1082 ret = pcmcia_request_configuration(p_dev, &p_dev->conf); 1184 ret = pcmcia_request_configuration(p_dev, &p_dev->conf);
1083 if (ret) 1185 if (ret)
1084 goto out; 1186 goto out;
@@ -1120,12 +1222,14 @@ static int pcmcia_bus_resume_callback(struct device *dev, void * _data)
1120 1222
1121static int pcmcia_bus_resume(struct pcmcia_socket *skt) 1223static int pcmcia_bus_resume(struct pcmcia_socket *skt)
1122{ 1224{
1225 ds_dbg(2, "resuming socket %d\n", skt->sock);
1123 bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback); 1226 bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback);
1124 return 0; 1227 return 0;
1125} 1228}
1126 1229
1127static int pcmcia_bus_suspend(struct pcmcia_socket *skt) 1230static int pcmcia_bus_suspend(struct pcmcia_socket *skt)
1128{ 1231{
1232 ds_dbg(2, "suspending socket %d\n", skt->sock);
1129 if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt, 1233 if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt,
1130 pcmcia_bus_suspend_callback)) { 1234 pcmcia_bus_suspend_callback)) {
1131 pcmcia_bus_resume(skt); 1235 pcmcia_bus_resume(skt);
@@ -1246,7 +1350,7 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev,
1246 init_waitqueue_head(&socket->queue); 1350 init_waitqueue_head(&socket->queue);
1247#endif 1351#endif
1248 INIT_LIST_HEAD(&socket->devices_list); 1352 INIT_LIST_HEAD(&socket->devices_list);
1249 INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device, socket); 1353 INIT_WORK(&socket->device_add, pcmcia_delayed_add_device);
1250 memset(&socket->pcmcia_state, 0, sizeof(u8)); 1354 memset(&socket->pcmcia_state, 0, sizeof(u8));
1251 socket->device_count = 0; 1355 socket->device_count = 0;
1252 1356
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 36fdaa58458c..3c22ac4625c2 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -398,7 +398,7 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
398static void pcc_interrupt_wrapper(u_long data) 398static void pcc_interrupt_wrapper(u_long data)
399{ 399{
400 debug(3, "m32r_cfc: pcc_interrupt_wrapper:\n"); 400 debug(3, "m32r_cfc: pcc_interrupt_wrapper:\n");
401 pcc_interrupt(0, NULL, NULL); 401 pcc_interrupt(0, NULL);
402 init_timer(&poll_timer); 402 init_timer(&poll_timer);
403 poll_timer.expires = jiffies + poll_interval; 403 poll_timer.expires = jiffies + poll_interval;
404 add_timer(&poll_timer); 404 add_timer(&poll_timer);
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 310ede575caa..d077870c6731 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -594,7 +594,12 @@ static int ds_ioctl(struct inode * inode, struct file * file,
594 594
595 err = ret = 0; 595 err = ret = 0;
596 596
597 if (cmd & IOC_IN) __copy_from_user((char *)buf, uarg, size); 597 if (cmd & IOC_IN) {
598 if (__copy_from_user((char *)buf, uarg, size)) {
599 err = -EFAULT;
600 goto free_out;
601 }
602 }
598 603
599 switch (cmd) { 604 switch (cmd) {
600 case DS_ADJUST_RESOURCE_INFO: 605 case DS_ADJUST_RESOURCE_INFO:
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index a70f97fdbbdd..360c24896548 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -581,10 +581,10 @@ static irqreturn_t pd6729_test(int irq, void *dev)
581 return IRQ_HANDLED; 581 return IRQ_HANDLED;
582} 582}
583 583
584static int pd6729_check_irq(int irq, int flags) 584static int pd6729_check_irq(int irq)
585{ 585{
586 if (request_irq(irq, pd6729_test, flags, "x", pd6729_test) != 0) 586 if (request_irq(irq, pd6729_test, IRQF_PROBE_SHARED, "x", pd6729_test)
587 return -1; 587 != 0) return -1;
588 free_irq(irq, pd6729_test); 588 free_irq(irq, pd6729_test);
589 return 0; 589 return 0;
590} 590}
@@ -610,7 +610,7 @@ static u_int __devinit pd6729_isa_scan(void)
610 610
611 /* just find interrupts that aren't in use */ 611 /* just find interrupts that aren't in use */
612 for (i = 0; i < 16; i++) 612 for (i = 0; i < 16; i++)
613 if ((mask0 & (1 << i)) && (pd6729_check_irq(i, 0) == 0)) 613 if ((mask0 & (1 << i)) && (pd6729_check_irq(i) == 0))
614 mask |= (1 << i); 614 mask |= (1 << i);
615 615
616 printk(KERN_INFO "pd6729: ISA irqs = "); 616 printk(KERN_INFO "pd6729: ISA irqs = ");
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 933cd864a5c9..b005602d6b53 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -188,7 +188,7 @@ static ssize_t pccard_store_resource(struct class_device *dev, const char *buf,
188 (s->state & SOCKET_PRESENT) && 188 (s->state & SOCKET_PRESENT) &&
189 !(s->state & SOCKET_CARDBUS)) { 189 !(s->state & SOCKET_CARDBUS)) {
190 if (try_module_get(s->callback->owner)) { 190 if (try_module_get(s->callback->owner)) {
191 s->callback->requery(s); 191 s->callback->requery(s, 0);
192 module_put(s->callback->owner); 192 module_put(s->callback->owner);
193 } 193 }
194 } 194 }
@@ -325,7 +325,7 @@ static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, siz
325 if ((s->callback) && (s->state & SOCKET_PRESENT) && 325 if ((s->callback) && (s->state & SOCKET_PRESENT) &&
326 !(s->state & SOCKET_CARDBUS)) { 326 !(s->state & SOCKET_CARDBUS)) {
327 if (try_module_get(s->callback->owner)) { 327 if (try_module_get(s->callback->owner)) {
328 s->callback->requery(s); 328 s->callback->requery(s, 1);
329 module_put(s->callback->owner); 329 module_put(s->callback->owner);
330 } 330 }
331 } 331 }
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 814b9e1873f5..828b329e08e0 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -53,9 +53,10 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
53 * Routine to poll RTC seconds field for change as often as possible, 53 * Routine to poll RTC seconds field for change as often as possible,
54 * after first RTC_UIE use timer to reduce polling 54 * after first RTC_UIE use timer to reduce polling
55 */ 55 */
56static void rtc_uie_task(void *data) 56static void rtc_uie_task(struct work_struct *work)
57{ 57{
58 struct rtc_device *rtc = data; 58 struct rtc_device *rtc =
59 container_of(work, struct rtc_device, uie_task);
59 struct rtc_time tm; 60 struct rtc_time tm;
60 int num = 0; 61 int num = 0;
61 int err; 62 int err;
@@ -411,7 +412,7 @@ static int rtc_dev_add_device(struct class_device *class_dev,
411 spin_lock_init(&rtc->irq_lock); 412 spin_lock_init(&rtc->irq_lock);
412 init_waitqueue_head(&rtc->irq_queue); 413 init_waitqueue_head(&rtc->irq_queue);
413#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 414#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
414 INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc); 415 INIT_WORK(&rtc->uie_task, rtc_uie_task);
415 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); 416 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
416#endif 417#endif
417 418
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 562432d017b0..335a25540c08 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -622,8 +622,10 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
622 dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 622 dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
623 /* restore the old result if the request sense was 623 /* restore the old result if the request sense was
624 * successful */ 624 * successful */
625 if(result == 0) 625 if (result == 0)
626 result = cmnd[7]; 626 result = cmnd[7];
627 /* restore the original length */
628 SCp->cmd_len = cmnd[8];
627 } else 629 } else
628 NCR_700_unmap(hostdata, SCp, slot); 630 NCR_700_unmap(hostdata, SCp, slot);
629 631
@@ -1007,6 +1009,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1007 * of the command */ 1009 * of the command */
1008 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC; 1010 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1009 cmnd[7] = hostdata->status[0]; 1011 cmnd[7] = hostdata->status[0];
1012 cmnd[8] = SCp->cmd_len;
1013 SCp->cmd_len = 6; /* command length for
1014 * REQUEST_SENSE */
1010 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); 1015 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1011 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 1016 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1012 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer)); 1017 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index cdd033724786..3075204915c8 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2186,21 +2186,21 @@ static int __init BusLogic_init(void)
2186 2186
2187 if (BusLogic_ProbeOptions.NoProbe) 2187 if (BusLogic_ProbeOptions.NoProbe)
2188 return -ENODEV; 2188 return -ENODEV;
2189 BusLogic_ProbeInfoList = (struct BusLogic_ProbeInfo *) 2189 BusLogic_ProbeInfoList =
2190 kmalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_ATOMIC); 2190 kzalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_KERNEL);
2191 if (BusLogic_ProbeInfoList == NULL) { 2191 if (BusLogic_ProbeInfoList == NULL) {
2192 BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL); 2192 BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL);
2193 return -ENOMEM; 2193 return -ENOMEM;
2194 } 2194 }
2195 memset(BusLogic_ProbeInfoList, 0, BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo)); 2195
2196 PrototypeHostAdapter = (struct BusLogic_HostAdapter *) 2196 PrototypeHostAdapter =
2197 kmalloc(sizeof(struct BusLogic_HostAdapter), GFP_ATOMIC); 2197 kzalloc(sizeof(struct BusLogic_HostAdapter), GFP_KERNEL);
2198 if (PrototypeHostAdapter == NULL) { 2198 if (PrototypeHostAdapter == NULL) {
2199 kfree(BusLogic_ProbeInfoList); 2199 kfree(BusLogic_ProbeInfoList);
2200 BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL); 2200 BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL);
2201 return -ENOMEM; 2201 return -ENOMEM;
2202 } 2202 }
2203 memset(PrototypeHostAdapter, 0, sizeof(struct BusLogic_HostAdapter)); 2203
2204#ifdef MODULE 2204#ifdef MODULE
2205 if (BusLogic != NULL) 2205 if (BusLogic != NULL)
2206 BusLogic_Setup(BusLogic); 2206 BusLogic_Setup(BusLogic);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9540eb8efdcb..69569096dae5 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -29,6 +29,13 @@ config SCSI
29 However, do not compile this as a module if your root file system 29 However, do not compile this as a module if your root file system
30 (the one containing the directory /) is located on a SCSI device. 30 (the one containing the directory /) is located on a SCSI device.
31 31
32config SCSI_TGT
33 tristate "SCSI target support"
34 depends on SCSI && EXPERIMENTAL
35 ---help---
36 If you want to use SCSI target mode drivers enable this option.
37 If you choose M, the module will be called scsi_tgt.
38
32config SCSI_NETLINK 39config SCSI_NETLINK
33 bool 40 bool
34 default n 41 default n
@@ -216,6 +223,23 @@ config SCSI_LOGGING
216 there should be no noticeable performance impact as long as you have 223 there should be no noticeable performance impact as long as you have
217 logging turned off. 224 logging turned off.
218 225
226config SCSI_SCAN_ASYNC
227 bool "Asynchronous SCSI scanning"
228 depends on SCSI
229 help
230 The SCSI subsystem can probe for devices while the rest of the
231 system continues booting, and even probe devices on different
232 busses in parallel, leading to a significant speed-up.
233 If you have built SCSI as modules, enabling this option can
234 be a problem as the devices may not have been found by the
235 time your system expects them to have been. You can load the
236 scsi_wait_scan module to ensure that all scans have completed.
237 If you build your SCSI drivers into the kernel, then everything
238 will work fine if you say Y here.
239
240 You can override this choice by specifying scsi_mod.scan="sync"
241 or "async" on the kernel's command line.
242
219menu "SCSI Transports" 243menu "SCSI Transports"
220 depends on SCSI 244 depends on SCSI
221 245
@@ -797,6 +821,20 @@ config SCSI_IBMVSCSI
797 To compile this driver as a module, choose M here: the 821 To compile this driver as a module, choose M here: the
798 module will be called ibmvscsic. 822 module will be called ibmvscsic.
799 823
824config SCSI_IBMVSCSIS
825 tristate "IBM Virtual SCSI Server support"
826 depends on PPC_PSERIES && SCSI_TGT && SCSI_SRP
827 help
828 This is the SRP target driver for IBM pSeries virtual environments.
829
830 The userspace component needed to initialize the driver and
831 documentation can be found:
832
833 http://stgt.berlios.de/
834
835 To compile this driver as a module, choose M here: the
836 module will be called ibmvstgt.
837
800config SCSI_INITIO 838config SCSI_INITIO
801 tristate "Initio 9100U(W) support" 839 tristate "Initio 9100U(W) support"
802 depends on PCI && SCSI 840 depends on PCI && SCSI
@@ -944,8 +982,13 @@ config SCSI_STEX
944 tristate "Promise SuperTrak EX Series support" 982 tristate "Promise SuperTrak EX Series support"
945 depends on PCI && SCSI 983 depends on PCI && SCSI
946 ---help--- 984 ---help---
947 This driver supports Promise SuperTrak EX8350/8300/16350/16300 985 This driver supports Promise SuperTrak EX series storage controllers.
948 Storage controllers. 986
987 Promise provides Linux RAID configuration utility for these
988 controllers. Please visit <http://www.promise.com> to download.
989
990 To compile this driver as a module, choose M here: the
991 module will be called stex.
949 992
950config SCSI_SYM53C8XX_2 993config SCSI_SYM53C8XX_2
951 tristate "SYM53C8XX Version 2 SCSI support" 994 tristate "SYM53C8XX Version 2 SCSI support"
@@ -1026,6 +1069,7 @@ config SCSI_IPR
1026config SCSI_IPR_TRACE 1069config SCSI_IPR_TRACE
1027 bool "enable driver internal trace" 1070 bool "enable driver internal trace"
1028 depends on SCSI_IPR 1071 depends on SCSI_IPR
1072 default y
1029 help 1073 help
1030 If you say Y here, the driver will trace all commands issued 1074 If you say Y here, the driver will trace all commands issued
1031 to the adapter. Performance impact is minimal. Trace can be 1075 to the adapter. Performance impact is minimal. Trace can be
@@ -1034,6 +1078,7 @@ config SCSI_IPR_TRACE
1034config SCSI_IPR_DUMP 1078config SCSI_IPR_DUMP
1035 bool "enable adapter dump support" 1079 bool "enable adapter dump support"
1036 depends on SCSI_IPR 1080 depends on SCSI_IPR
1081 default y
1037 help 1082 help
1038 If you say Y here, the driver will support adapter crash dump. 1083 If you say Y here, the driver will support adapter crash dump.
1039 If you enable this support, the iprdump daemon can be used 1084 If you enable this support, the iprdump daemon can be used
@@ -1734,6 +1779,16 @@ config ZFCP
1734 called zfcp. If you want to compile it as a module, say M here 1779 called zfcp. If you want to compile it as a module, say M here
1735 and read <file:Documentation/modules.txt>. 1780 and read <file:Documentation/modules.txt>.
1736 1781
1782config SCSI_SRP
1783 tristate "SCSI RDMA Protocol helper library"
1784 depends on SCSI && PCI
1785 select SCSI_TGT
1786 help
1787 If you wish to use SRP target drivers, say Y.
1788
1789 To compile this driver as a module, choose M here: the
1790 module will be called libsrp.
1791
1737endmenu 1792endmenu
1738 1793
1739source "drivers/scsi/pcmcia/Kconfig" 1794source "drivers/scsi/pcmcia/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index bcca39c3bcbf..bd7c9888f7f4 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -21,6 +21,7 @@ CFLAGS_seagate.o = -DARBITRATE -DPARITY -DSEAGATE_USE_ASM
21subdir-$(CONFIG_PCMCIA) += pcmcia 21subdir-$(CONFIG_PCMCIA) += pcmcia
22 22
23obj-$(CONFIG_SCSI) += scsi_mod.o 23obj-$(CONFIG_SCSI) += scsi_mod.o
24obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o
24 25
25obj-$(CONFIG_RAID_ATTRS) += raid_class.o 26obj-$(CONFIG_RAID_ATTRS) += raid_class.o
26 27
@@ -125,7 +126,9 @@ obj-$(CONFIG_SCSI_FCAL) += fcal.o
125obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o 126obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
126obj-$(CONFIG_SCSI_NSP32) += nsp32.o 127obj-$(CONFIG_SCSI_NSP32) += nsp32.o
127obj-$(CONFIG_SCSI_IPR) += ipr.o 128obj-$(CONFIG_SCSI_IPR) += ipr.o
129obj-$(CONFIG_SCSI_SRP) += libsrp.o
128obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ 130obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
131obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
129obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 132obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
130obj-$(CONFIG_SCSI_STEX) += stex.o 133obj-$(CONFIG_SCSI_STEX) += stex.o
131 134
@@ -141,6 +144,8 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o
141# This goes last, so that "real" scsi devices probe earlier 144# This goes last, so that "real" scsi devices probe earlier
142obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o 145obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
143 146
147obj-$(CONFIG_SCSI) += scsi_wait_scan.o
148
144scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ 149scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
145 scsicam.o scsi_error.o scsi_lib.o \ 150 scsicam.o scsi_error.o scsi_lib.o \
146 scsi_scan.o scsi_sysfs.o \ 151 scsi_scan.o scsi_sysfs.o \
@@ -149,6 +154,8 @@ scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
149scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o 154scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
150scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 155scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
151 156
157scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
158
152sd_mod-objs := sd.o 159sd_mod-objs := sd.o
153sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o 160sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
154ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ 161ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index a6aa91072880..bb3cb3360541 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -849,7 +849,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
849 hostdata->issue_queue = NULL; 849 hostdata->issue_queue = NULL;
850 hostdata->disconnected_queue = NULL; 850 hostdata->disconnected_queue = NULL;
851 851
852 INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata); 852 INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
853 853
854#ifdef NCR5380_STATS 854#ifdef NCR5380_STATS
855 for (i = 0; i < 8; ++i) { 855 for (i = 0; i < 8; ++i) {
@@ -1016,7 +1016,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1016 1016
1017 /* Run the coroutine if it isn't already running. */ 1017 /* Run the coroutine if it isn't already running. */
1018 /* Kick off command processing */ 1018 /* Kick off command processing */
1019 schedule_work(&hostdata->coroutine); 1019 schedule_delayed_work(&hostdata->coroutine, 0);
1020 return 0; 1020 return 0;
1021} 1021}
1022 1022
@@ -1033,9 +1033,10 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1033 * host lock and called routines may take the isa dma lock. 1033 * host lock and called routines may take the isa dma lock.
1034 */ 1034 */
1035 1035
1036static void NCR5380_main(void *p) 1036static void NCR5380_main(struct work_struct *work)
1037{ 1037{
1038 struct NCR5380_hostdata *hostdata = p; 1038 struct NCR5380_hostdata *hostdata =
1039 container_of(work, struct NCR5380_hostdata, coroutine.work);
1039 struct Scsi_Host *instance = hostdata->host; 1040 struct Scsi_Host *instance = hostdata->host;
1040 Scsi_Cmnd *tmp, *prev; 1041 Scsi_Cmnd *tmp, *prev;
1041 int done; 1042 int done;
@@ -1221,7 +1222,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1221 } /* if BASR_IRQ */ 1222 } /* if BASR_IRQ */
1222 spin_unlock_irqrestore(instance->host_lock, flags); 1223 spin_unlock_irqrestore(instance->host_lock, flags);
1223 if(!done) 1224 if(!done)
1224 schedule_work(&hostdata->coroutine); 1225 schedule_delayed_work(&hostdata->coroutine, 0);
1225 } while (!done); 1226 } while (!done);
1226 return IRQ_HANDLED; 1227 return IRQ_HANDLED;
1227} 1228}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 1bc73de496b0..713a108c02ef 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -271,7 +271,7 @@ struct NCR5380_hostdata {
271 unsigned long time_expires; /* in jiffies, set prior to sleeping */ 271 unsigned long time_expires; /* in jiffies, set prior to sleeping */
272 int select_time; /* timer in select for target response */ 272 int select_time; /* timer in select for target response */
273 volatile Scsi_Cmnd *selecting; 273 volatile Scsi_Cmnd *selecting;
274 struct work_struct coroutine; /* our co-routine */ 274 struct delayed_work coroutine; /* our co-routine */
275#ifdef NCR5380_STATS 275#ifdef NCR5380_STATS
276 unsigned timebase; /* Base for time calcs */ 276 unsigned timebase; /* Base for time calcs */
277 long time_read[8]; /* time to do reads */ 277 long time_read[8]; /* time to do reads */
@@ -298,7 +298,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
298#ifndef DONT_USE_INTR 298#ifndef DONT_USE_INTR
299static irqreturn_t NCR5380_intr(int irq, void *dev_id); 299static irqreturn_t NCR5380_intr(int irq, void *dev_id);
300#endif 300#endif
301static void NCR5380_main(void *ptr); 301static void NCR5380_main(struct work_struct *work);
302static void NCR5380_print_options(struct Scsi_Host *instance); 302static void NCR5380_print_options(struct Scsi_Host *instance);
303#ifdef NDEBUG 303#ifdef NDEBUG
304static void NCR5380_print_phase(struct Scsi_Host *instance); 304static void NCR5380_print_phase(struct Scsi_Host *instance);
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index d4613815f685..8578555d58fd 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -220,9 +220,11 @@ static void *addresses[] = {
220static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 }; 220static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 };
221#define PORT_COUNT ARRAY_SIZE(ports) 221#define PORT_COUNT ARRAY_SIZE(ports)
222 222
223#ifndef MODULE
223/* possible interrupt channels */ 224/* possible interrupt channels */
224static unsigned short intrs[] = { 10, 11, 12, 15 }; 225static unsigned short intrs[] = { 10, 11, 12, 15 };
225#define INTR_COUNT ARRAY_SIZE(intrs) 226#define INTR_COUNT ARRAY_SIZE(intrs)
227#endif /* !MODULE */
226 228
227/* signatures for NCR 53c406a based controllers */ 229/* signatures for NCR 53c406a based controllers */
228#if USE_BIOS 230#if USE_BIOS
@@ -605,6 +607,7 @@ static int NCR53c406a_release(struct Scsi_Host *shost)
605 return 0; 607 return 0;
606} 608}
607 609
610#ifndef MODULE
608/* called from init/main.c */ 611/* called from init/main.c */
609static int __init NCR53c406a_setup(char *str) 612static int __init NCR53c406a_setup(char *str)
610{ 613{
@@ -661,6 +664,8 @@ static int __init NCR53c406a_setup(char *str)
661 664
662__setup("ncr53c406a=", NCR53c406a_setup); 665__setup("ncr53c406a=", NCR53c406a_setup);
663 666
667#endif /* !MODULE */
668
664static const char *NCR53c406a_info(struct Scsi_Host *SChost) 669static const char *NCR53c406a_info(struct Scsi_Host *SChost)
665{ 670{
666 DEB(printk("NCR53c406a_info called\n")); 671 DEB(printk("NCR53c406a_info called\n"));
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index eb3ed91bac79..4f8b4c53d435 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -11,8 +11,8 @@
11 *----------------------------------------------------------------------------*/ 11 *----------------------------------------------------------------------------*/
12 12
13#ifndef AAC_DRIVER_BUILD 13#ifndef AAC_DRIVER_BUILD
14# define AAC_DRIVER_BUILD 2409 14# define AAC_DRIVER_BUILD 2423
15# define AAC_DRIVER_BRANCH "-mh2" 15# define AAC_DRIVER_BRANCH "-mh3"
16#endif 16#endif
17#define MAXIMUM_NUM_CONTAINERS 32 17#define MAXIMUM_NUM_CONTAINERS 32
18 18
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 19e42ac07cb2..4893a6d06a33 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -518,6 +518,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
518 */ 518 */
519 unsigned long count = 36000000L; /* 3 minutes */ 519 unsigned long count = 36000000L; /* 3 minutes */
520 while (down_trylock(&fibptr->event_wait)) { 520 while (down_trylock(&fibptr->event_wait)) {
521 int blink;
521 if (--count == 0) { 522 if (--count == 0) {
522 spin_lock_irqsave(q->lock, qflags); 523 spin_lock_irqsave(q->lock, qflags);
523 q->numpending--; 524 q->numpending--;
@@ -530,6 +531,14 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
530 } 531 }
531 return -ETIMEDOUT; 532 return -ETIMEDOUT;
532 } 533 }
534 if ((blink = aac_adapter_check_health(dev)) > 0) {
535 if (wait == -1) {
536 printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
537 "Usually a result of a serious unrecoverable hardware problem\n",
538 blink);
539 }
540 return -EFAULT;
541 }
533 udelay(5); 542 udelay(5);
534 } 543 }
535 } else if (down_interruptible(&fibptr->event_wait)) { 544 } else if (down_interruptible(&fibptr->event_wait)) {
@@ -1093,6 +1102,20 @@ static int _aac_reset_adapter(struct aac_dev *aac)
1093 goto out; 1102 goto out;
1094 } 1103 }
1095 1104
1105 /*
1106 * Loop through the fibs, close the synchronous FIBS
1107 */
1108 for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1109 struct fib *fib = &aac->fibs[index];
1110 if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1111 (fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) {
1112 unsigned long flagv;
1113 spin_lock_irqsave(&fib->event_lock, flagv);
1114 up(&fib->event_wait);
1115 spin_unlock_irqrestore(&fib->event_lock, flagv);
1116 schedule();
1117 }
1118 }
1096 index = aac->cardtype; 1119 index = aac->cardtype;
1097 1120
1098 /* 1121 /*
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 306f46b85a55..0cec742d12e9 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1443,7 +1443,7 @@ static struct work_struct aha152x_tq;
1443 * Run service completions on the card with interrupts enabled. 1443 * Run service completions on the card with interrupts enabled.
1444 * 1444 *
1445 */ 1445 */
1446static void run(void) 1446static void run(struct work_struct *work)
1447{ 1447{
1448 struct aha152x_hostdata *hd; 1448 struct aha152x_hostdata *hd;
1449 1449
@@ -1499,7 +1499,7 @@ static irqreturn_t intr(int irqno, void *dev_id)
1499 HOSTDATA(shpnt)->service=1; 1499 HOSTDATA(shpnt)->service=1;
1500 1500
1501 /* Poke the BH handler */ 1501 /* Poke the BH handler */
1502 INIT_WORK(&aha152x_tq, (void *) run, NULL); 1502 INIT_WORK(&aha152x_tq, run);
1503 schedule_work(&aha152x_tq); 1503 schedule_work(&aha152x_tq);
1504 } 1504 }
1505 DO_UNLOCK(flags); 1505 DO_UNLOCK(flags);
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index c3c38a7e8d32..d7af9c63a04d 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -586,7 +586,7 @@ static struct scsi_host_template aha1740_template = {
586 586
587static int aha1740_probe (struct device *dev) 587static int aha1740_probe (struct device *dev)
588{ 588{
589 int slotbase; 589 int slotbase, rc;
590 unsigned int irq_level, irq_type, translation; 590 unsigned int irq_level, irq_type, translation;
591 struct Scsi_Host *shpnt; 591 struct Scsi_Host *shpnt;
592 struct aha1740_hostdata *host; 592 struct aha1740_hostdata *host;
@@ -641,10 +641,16 @@ static int aha1740_probe (struct device *dev)
641 } 641 }
642 642
643 eisa_set_drvdata (edev, shpnt); 643 eisa_set_drvdata (edev, shpnt);
644 scsi_add_host (shpnt, dev); /* XXX handle failure */ 644
645 rc = scsi_add_host (shpnt, dev);
646 if (rc)
647 goto err_irq;
648
645 scsi_scan_host (shpnt); 649 scsi_scan_host (shpnt);
646 return 0; 650 return 0;
647 651
652 err_irq:
653 free_irq(irq_level, shpnt);
648 err_unmap: 654 err_unmap:
649 dma_unmap_single (&edev->dev, host->ecb_dma_addr, 655 dma_unmap_single (&edev->dev, host->ecb_dma_addr,
650 sizeof (host->ecb), DMA_BIDIRECTIONAL); 656 sizeof (host->ecb), DMA_BIDIRECTIONAL);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 2001fe890e71..1a3ab6aa856b 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -62,6 +62,7 @@ static struct pci_device_id ahd_linux_pci_id_table[] = {
62 /* aic7901 based controllers */ 62 /* aic7901 based controllers */
63 ID(ID_AHA_29320A), 63 ID(ID_AHA_29320A),
64 ID(ID_AHA_29320ALP), 64 ID(ID_AHA_29320ALP),
65 ID(ID_AHA_29320LPE),
65 /* aic7902 based controllers */ 66 /* aic7902 based controllers */
66 ID(ID_AHA_29320), 67 ID(ID_AHA_29320),
67 ID(ID_AHA_29320B), 68 ID(ID_AHA_29320B),
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c07735819cd1..2cf7bb3123f0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -109,7 +109,13 @@ static struct ahd_pci_identity ahd_pci_ident_table [] =
109 { 109 {
110 ID_AHA_29320ALP, 110 ID_AHA_29320ALP,
111 ID_ALL_MASK, 111 ID_ALL_MASK,
112 "Adaptec 29320ALP Ultra320 SCSI adapter", 112 "Adaptec 29320ALP PCIx Ultra320 SCSI adapter",
113 ahd_aic7901_setup
114 },
115 {
116 ID_AHA_29320LPE,
117 ID_ALL_MASK,
118 "Adaptec 29320LPE PCIe Ultra320 SCSI adapter",
113 ahd_aic7901_setup 119 ahd_aic7901_setup
114 }, 120 },
115 /* aic7901A based controllers */ 121 /* aic7901A based controllers */
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.h b/drivers/scsi/aic7xxx/aic79xx_pci.h
index da45153668c7..16b7c70a673c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.h
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.h
@@ -51,6 +51,7 @@
51#define ID_AIC7901 0x800F9005FFFF9005ull 51#define ID_AIC7901 0x800F9005FFFF9005ull
52#define ID_AHA_29320A 0x8000900500609005ull 52#define ID_AHA_29320A 0x8000900500609005ull
53#define ID_AHA_29320ALP 0x8017900500449005ull 53#define ID_AHA_29320ALP 0x8017900500449005ull
54#define ID_AHA_29320LPE 0x8017900500459005ull
54 55
55#define ID_AIC7901A 0x801E9005FFFF9005ull 56#define ID_AIC7901A 0x801E9005FFFF9005ull
56#define ID_AHA_29320LP 0x8014900500449005ull 57#define ID_AHA_29320LP 0x8014900500449005ull
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 57c5ba4043f2..42302ef05ee5 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -724,6 +724,15 @@ static void asd_free_queues(struct asd_ha_struct *asd_ha)
724 724
725 list_for_each_safe(pos, n, &pending) { 725 list_for_each_safe(pos, n, &pending) {
726 struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list); 726 struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
727 /*
728 * Delete unexpired ascb timers. This may happen if we issue
729 * a CONTROL PHY scb to an adapter and rmmod before the scb
730 * times out. Apparently we don't wait for the CONTROL PHY
731 * to complete, so it doesn't matter if we kill the timer.
732 */
733 del_timer_sync(&ascb->timer);
734 WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
735
727 list_del_init(pos); 736 list_del_init(pos);
728 ASD_DPRINTK("freeing from pending\n"); 737 ASD_DPRINTK("freeing from pending\n");
729 asd_ascb_free(ascb); 738 asd_ascb_free(ascb);
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index b15caf1c8fa2..75ed6b0569d1 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <scsi/scsi_host.h>
28 29
29#include "aic94xx.h" 30#include "aic94xx.h"
30#include "aic94xx_reg.h" 31#include "aic94xx_reg.h"
@@ -412,6 +413,40 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
412 } 413 }
413} 414}
414 415
416/* hard reset a phy later */
417static void do_phy_reset_later(struct work_struct *work)
418{
419 struct sas_phy *sas_phy =
420 container_of(work, struct sas_phy, reset_work);
421 int error;
422
423 ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
424 sas_phy->identify.phy_identifier);
425 /* Reset device port */
426 error = sas_phy_reset(sas_phy, 1);
427 if (error)
428 ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n",
429 __FUNCTION__, sas_phy->identify.phy_identifier, error);
430}
431
432static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
433{
434 INIT_WORK(&sas_phy->reset_work, do_phy_reset_later);
435 queue_work(shost->work_q, &sas_phy->reset_work);
436}
437
438/* start up the ABORT TASK tmf... */
439static void task_kill_later(struct asd_ascb *ascb)
440{
441 struct asd_ha_struct *asd_ha = ascb->ha;
442 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
443 struct Scsi_Host *shost = sas_ha->core.shost;
444 struct sas_task *task = ascb->uldd_task;
445
446 INIT_WORK(&task->abort_work, sas_task_abort);
447 queue_work(shost->work_q, &task->abort_work);
448}
449
415static void escb_tasklet_complete(struct asd_ascb *ascb, 450static void escb_tasklet_complete(struct asd_ascb *ascb,
416 struct done_list_struct *dl) 451 struct done_list_struct *dl)
417{ 452{
@@ -439,6 +474,74 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
439 ascb->scb->header.opcode); 474 ascb->scb->header.opcode);
440 } 475 }
441 476
477 /* Catch these before we mask off the sb_opcode bits */
478 switch (sb_opcode) {
479 case REQ_TASK_ABORT: {
480 struct asd_ascb *a, *b;
481 u16 tc_abort;
482
483 tc_abort = *((u16*)(&dl->status_block[1]));
484 tc_abort = le16_to_cpu(tc_abort);
485
486 ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
487 __FUNCTION__, dl->status_block[3]);
488
489 /* Find the pending task and abort it. */
490 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list)
491 if (a->tc_index == tc_abort) {
492 task_kill_later(a);
493 break;
494 }
495 goto out;
496 }
497 case REQ_DEVICE_RESET: {
498 struct Scsi_Host *shost = sas_ha->core.shost;
499 struct sas_phy *dev_phy;
500 struct asd_ascb *a;
501 u16 conn_handle;
502
503 conn_handle = *((u16*)(&dl->status_block[1]));
504 conn_handle = le16_to_cpu(conn_handle);
505
506 ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
507 dl->status_block[3]);
508
509 /* Kill all pending tasks and reset the device */
510 dev_phy = NULL;
511 list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
512 struct sas_task *task;
513 struct domain_device *dev;
514 u16 x;
515
516 task = a->uldd_task;
517 if (!task)
518 continue;
519 dev = task->dev;
520
521 x = (unsigned long)dev->lldd_dev;
522 if (x == conn_handle) {
523 dev_phy = dev->port->phy;
524 task_kill_later(a);
525 }
526 }
527
528 /* Reset device port */
529 if (!dev_phy) {
530 ASD_DPRINTK("%s: No pending commands; can't reset.\n",
531 __FUNCTION__);
532 goto out;
533 }
534 phy_reset_later(dev_phy, shost);
535 goto out;
536 }
537 case SIGNAL_NCQ_ERROR:
538 ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
539 goto out;
540 case CLEAR_NCQ_ERROR:
541 ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
542 goto out;
543 }
544
442 sb_opcode &= ~DL_PHY_MASK; 545 sb_opcode &= ~DL_PHY_MASK;
443 546
444 switch (sb_opcode) { 547 switch (sb_opcode) {
@@ -469,22 +572,6 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
469 asd_deform_port(asd_ha, phy); 572 asd_deform_port(asd_ha, phy);
470 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT); 573 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
471 break; 574 break;
472 case REQ_TASK_ABORT:
473 ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
474 phy_id);
475 break;
476 case REQ_DEVICE_RESET:
477 ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
478 phy_id);
479 break;
480 case SIGNAL_NCQ_ERROR:
481 ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
482 phy_id);
483 break;
484 case CLEAR_NCQ_ERROR:
485 ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
486 phy_id);
487 break;
488 default: 575 default:
489 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__, 576 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
490 phy_id, sb_opcode); 577 phy_id, sb_opcode);
@@ -504,7 +591,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
504 591
505 break; 592 break;
506 } 593 }
507 594out:
508 asd_invalidate_edb(ascb, edb); 595 asd_invalidate_edb(ascb, edb);
509} 596}
510 597
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index ef8285c326e4..668569e8856b 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -294,6 +294,7 @@ static struct Scsi_Host *hosts[FD_MAX_HOSTS + 1] = { NULL };
294static int user_fifo_count = 0; 294static int user_fifo_count = 0;
295static int user_fifo_size = 0; 295static int user_fifo_size = 0;
296 296
297#ifndef MODULE
297static int __init fd_mcs_setup(char *str) 298static int __init fd_mcs_setup(char *str)
298{ 299{
299 static int done_setup = 0; 300 static int done_setup = 0;
@@ -311,6 +312,7 @@ static int __init fd_mcs_setup(char *str)
311} 312}
312 313
313__setup("fd_mcs=", fd_mcs_setup); 314__setup("fd_mcs=", fd_mcs_setup);
315#endif /* !MODULE */
314 316
315static void print_banner(struct Scsi_Host *shpnt) 317static void print_banner(struct Scsi_Host *shpnt)
316{ 318{
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 68ef1636678d..38c3a291efac 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -263,6 +263,10 @@ static void scsi_host_dev_release(struct device *dev)
263 kthread_stop(shost->ehandler); 263 kthread_stop(shost->ehandler);
264 if (shost->work_q) 264 if (shost->work_q)
265 destroy_workqueue(shost->work_q); 265 destroy_workqueue(shost->work_q);
266 if (shost->uspace_req_q) {
267 kfree(shost->uspace_req_q->queuedata);
268 scsi_free_queue(shost->uspace_req_q);
269 }
266 270
267 scsi_destroy_command_freelist(shost); 271 scsi_destroy_command_freelist(shost);
268 if (shost->bqt) 272 if (shost->bqt)
@@ -301,8 +305,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
301 if (!shost) 305 if (!shost)
302 return NULL; 306 return NULL;
303 307
304 spin_lock_init(&shost->default_lock); 308 shost->host_lock = &shost->default_lock;
305 scsi_assign_lock(shost, &shost->default_lock); 309 spin_lock_init(shost->host_lock);
306 shost->shost_state = SHOST_CREATED; 310 shost->shost_state = SHOST_CREATED;
307 INIT_LIST_HEAD(&shost->__devices); 311 INIT_LIST_HEAD(&shost->__devices);
308 INIT_LIST_HEAD(&shost->__targets); 312 INIT_LIST_HEAD(&shost->__targets);
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index 4e247b6b8700..6ac0633d5452 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
3ibmvscsic-y += ibmvscsi.o 3ibmvscsic-y += ibmvscsi.o
4ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o 4ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
5ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o 5ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
6
7obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
new file mode 100644
index 000000000000..0e74174a1b37
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -0,0 +1,958 @@
1/*
2 * IBM eServer i/pSeries Virtual SCSI Target Driver
3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4 * Santiago Leon (santil@us.ibm.com) IBM Corp.
5 * Linda Xie (lxie@us.ibm.com) IBM Corp.
6 *
7 * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 */
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <scsi/scsi.h>
27#include <scsi/scsi_host.h>
28#include <scsi/scsi_tgt.h>
29#include <scsi/libsrp.h>
30#include <asm/hvcall.h>
31#include <asm/iommu.h>
32#include <asm/prom.h>
33#include <asm/vio.h>
34
35#include "ibmvscsi.h"
36
37#define INITIAL_SRP_LIMIT 16
38#define DEFAULT_MAX_SECTORS 512
39
40#define TGT_NAME "ibmvstgt"
41
42/*
43 * Hypervisor calls.
44 */
45#define h_copy_rdma(l, sa, sb, da, db) \
46 plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
47#define h_send_crq(ua, l, h) \
48 plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
49#define h_reg_crq(ua, tok, sz)\
50 plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
51#define h_free_crq(ua) \
52 plpar_hcall_norets(H_FREE_CRQ, ua);
53
54/* tmp - will replace with SCSI logging stuff */
55#define eprintk(fmt, args...) \
56do { \
57 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
58} while (0)
59/* #define dprintk eprintk */
60#define dprintk(fmt, args...)
61
62struct vio_port {
63 struct vio_dev *dma_dev;
64
65 struct crq_queue crq_queue;
66 struct work_struct crq_work;
67
68 unsigned long liobn;
69 unsigned long riobn;
70};
71
72static struct workqueue_struct *vtgtd;
73
74/*
75 * These are fixed for the system and come from the Open Firmware device tree.
76 * We just store them here to save getting them every time.
77 */
78static char system_id[64] = "";
79static char partition_name[97] = "UNKNOWN";
80static unsigned int partition_number = -1;
81
82static struct vio_port *target_to_port(struct srp_target *target)
83{
84 return (struct vio_port *) target->ldata;
85}
86
87static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
88{
89 return (union viosrp_iu *) (iue->sbuf->buf);
90}
91
92static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
93{
94 struct srp_target *target = iue->target;
95 struct vio_port *vport = target_to_port(target);
96 long rc, rc1;
97 union {
98 struct viosrp_crq cooked;
99 uint64_t raw[2];
100 } crq;
101
102 /* First copy the SRP */
103 rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
104 vport->riobn, iue->remote_token);
105
106 if (rc)
107 eprintk("Error %ld transferring data\n", rc);
108
109 crq.cooked.valid = 0x80;
110 crq.cooked.format = format;
111 crq.cooked.reserved = 0x00;
112 crq.cooked.timeout = 0x00;
113 crq.cooked.IU_length = length;
114 crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
115
116 if (rc == 0)
117 crq.cooked.status = 0x99; /* Just needs to be non-zero */
118 else
119 crq.cooked.status = 0x00;
120
121 rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
122
123 if (rc1) {
124 eprintk("%ld sending response\n", rc1);
125 return rc1;
126 }
127
128 return rc;
129}
130
131#define SRP_RSP_SENSE_DATA_LEN 18
132
133static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
134 unsigned char status, unsigned char asc)
135{
136 union viosrp_iu *iu = vio_iu(iue);
137 uint64_t tag = iu->srp.rsp.tag;
138
139 /* If the linked bit is on and status is good */
140 if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
141 status = 0x10;
142
143 memset(iu, 0, sizeof(struct srp_rsp));
144 iu->srp.rsp.opcode = SRP_RSP;
145 iu->srp.rsp.req_lim_delta = 1;
146 iu->srp.rsp.tag = tag;
147
148 if (test_bit(V_DIOVER, &iue->flags))
149 iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
150
151 iu->srp.rsp.data_in_res_cnt = 0;
152 iu->srp.rsp.data_out_res_cnt = 0;
153
154 iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
155
156 iu->srp.rsp.resp_data_len = 0;
157 iu->srp.rsp.status = status;
158 if (status) {
159 uint8_t *sense = iu->srp.rsp.data;
160
161 if (sc) {
162 iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
163 iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
164 memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
165 } else {
166 iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
167 iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
168 iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
169
170 /* Valid bit and 'current errors' */
171 sense[0] = (0x1 << 7 | 0x70);
172 /* Sense key */
173 sense[2] = status;
174 /* Additional sense length */
175 sense[7] = 0xa; /* 10 bytes */
176 /* Additional sense code */
177 sense[12] = asc;
178 }
179 }
180
181 send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
182 VIOSRP_SRP_FORMAT);
183
184 return 0;
185}
186
187static void handle_cmd_queue(struct srp_target *target)
188{
189 struct Scsi_Host *shost = target->shost;
190 struct iu_entry *iue;
191 struct srp_cmd *cmd;
192 unsigned long flags;
193 int err;
194
195retry:
196 spin_lock_irqsave(&target->lock, flags);
197
198 list_for_each_entry(iue, &target->cmd_queue, ilist) {
199 if (!test_and_set_bit(V_FLYING, &iue->flags)) {
200 spin_unlock_irqrestore(&target->lock, flags);
201 cmd = iue->sbuf->buf;
202 err = srp_cmd_queue(shost, cmd, iue, 0);
203 if (err) {
204 eprintk("cannot queue cmd %p %d\n", cmd, err);
205 srp_iu_put(iue);
206 }
207 goto retry;
208 }
209 }
210
211 spin_unlock_irqrestore(&target->lock, flags);
212}
213
214static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
215 struct srp_direct_buf *md, int nmd,
216 enum dma_data_direction dir, unsigned int rest)
217{
218 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
219 struct srp_target *target = iue->target;
220 struct vio_port *vport = target_to_port(target);
221 dma_addr_t token;
222 long err;
223 unsigned int done = 0;
224 int i, sidx, soff;
225
226 sidx = soff = 0;
227 token = sg_dma_address(sg + sidx);
228
229 for (i = 0; i < nmd && rest; i++) {
230 unsigned int mdone, mlen;
231
232 mlen = min(rest, md[i].len);
233 for (mdone = 0; mlen;) {
234 int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
235
236 if (dir == DMA_TO_DEVICE)
237 err = h_copy_rdma(slen,
238 vport->riobn,
239 md[i].va + mdone,
240 vport->liobn,
241 token + soff);
242 else
243 err = h_copy_rdma(slen,
244 vport->liobn,
245 token + soff,
246 vport->riobn,
247 md[i].va + mdone);
248
249 if (err != H_SUCCESS) {
250 eprintk("rdma error %d %d\n", dir, slen);
251 goto out;
252 }
253
254 mlen -= slen;
255 mdone += slen;
256 soff += slen;
257 done += slen;
258
259 if (soff == sg_dma_len(sg + sidx)) {
260 sidx++;
261 soff = 0;
262 token = sg_dma_address(sg + sidx);
263
264 if (sidx > nsg) {
265 eprintk("out of sg %p %d %d\n",
266 iue, sidx, nsg);
267 goto out;
268 }
269 }
270 };
271
272 rest -= mlen;
273 }
274out:
275
276 return 0;
277}
278
279static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
280 void (*done)(struct scsi_cmnd *))
281{
282 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
283 int err;
284
285 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
286
287 done(sc);
288
289 return err;
290}
291
292static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
293 void (*done)(struct scsi_cmnd *))
294{
295 unsigned long flags;
296 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
297 struct srp_target *target = iue->target;
298
299 dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
300
301 spin_lock_irqsave(&target->lock, flags);
302 list_del(&iue->ilist);
303 spin_unlock_irqrestore(&target->lock, flags);
304
305 if (sc->result != SAM_STAT_GOOD) {
306 eprintk("operation failed %p %d %x\n",
307 iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
308 send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
309 } else
310 send_rsp(iue, sc, NO_SENSE, 0x00);
311
312 done(sc);
313 srp_iu_put(iue);
314 return 0;
315}
316
317int send_adapter_info(struct iu_entry *iue,
318 dma_addr_t remote_buffer, uint16_t length)
319{
320 struct srp_target *target = iue->target;
321 struct vio_port *vport = target_to_port(target);
322 struct Scsi_Host *shost = target->shost;
323 dma_addr_t data_token;
324 struct mad_adapter_info_data *info;
325 int err;
326
327 info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
328 GFP_KERNEL);
329 if (!info) {
330 eprintk("bad dma_alloc_coherent %p\n", target);
331 return 1;
332 }
333
334 /* Get remote info */
335 err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
336 vport->liobn, data_token);
337 if (err == H_SUCCESS) {
338 dprintk("Client connect: %s (%d)\n",
339 info->partition_name, info->partition_number);
340 }
341
342 memset(info, 0, sizeof(*info));
343
344 strcpy(info->srp_version, "16.a");
345 strncpy(info->partition_name, partition_name,
346 sizeof(info->partition_name));
347 info->partition_number = partition_number;
348 info->mad_version = 1;
349 info->os_type = 2;
350 info->port_max_txu[0] = shost->hostt->max_sectors << 9;
351
352 /* Send our info to remote */
353 err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
354 vport->riobn, remote_buffer);
355
356 dma_free_coherent(target->dev, sizeof(*info), info, data_token);
357
358 if (err != H_SUCCESS) {
359 eprintk("Error sending adapter info %d\n", err);
360 return 1;
361 }
362
363 return 0;
364}
365
366static void process_login(struct iu_entry *iue)
367{
368 union viosrp_iu *iu = vio_iu(iue);
369 struct srp_login_rsp *rsp = &iu->srp.login_rsp;
370 uint64_t tag = iu->srp.rsp.tag;
371
372 /* TODO handle case that requested size is wrong and
373 * buffer format is wrong
374 */
375 memset(iu, 0, sizeof(struct srp_login_rsp));
376 rsp->opcode = SRP_LOGIN_RSP;
377 rsp->req_lim_delta = INITIAL_SRP_LIMIT;
378 rsp->tag = tag;
379 rsp->max_it_iu_len = sizeof(union srp_iu);
380 rsp->max_ti_iu_len = sizeof(union srp_iu);
381 /* direct and indirect */
382 rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
383
384 send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
385}
386
387static inline void queue_cmd(struct iu_entry *iue)
388{
389 struct srp_target *target = iue->target;
390 unsigned long flags;
391
392 spin_lock_irqsave(&target->lock, flags);
393 list_add_tail(&iue->ilist, &target->cmd_queue);
394 spin_unlock_irqrestore(&target->lock, flags);
395}
396
397static int process_tsk_mgmt(struct iu_entry *iue)
398{
399 union viosrp_iu *iu = vio_iu(iue);
400 int fn;
401
402 dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
403
404 switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
405 case SRP_TSK_ABORT_TASK:
406 fn = ABORT_TASK;
407 break;
408 case SRP_TSK_ABORT_TASK_SET:
409 fn = ABORT_TASK_SET;
410 break;
411 case SRP_TSK_CLEAR_TASK_SET:
412 fn = CLEAR_TASK_SET;
413 break;
414 case SRP_TSK_LUN_RESET:
415 fn = LOGICAL_UNIT_RESET;
416 break;
417 case SRP_TSK_CLEAR_ACA:
418 fn = CLEAR_ACA;
419 break;
420 default:
421 fn = 0;
422 }
423 if (fn)
424 scsi_tgt_tsk_mgmt_request(iue->target->shost, fn,
425 iu->srp.tsk_mgmt.task_tag,
426 (struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
427 iue);
428 else
429 send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
430
431 return !fn;
432}
433
434static int process_mad_iu(struct iu_entry *iue)
435{
436 union viosrp_iu *iu = vio_iu(iue);
437 struct viosrp_adapter_info *info;
438 struct viosrp_host_config *conf;
439
440 switch (iu->mad.empty_iu.common.type) {
441 case VIOSRP_EMPTY_IU_TYPE:
442 eprintk("%s\n", "Unsupported EMPTY MAD IU");
443 break;
444 case VIOSRP_ERROR_LOG_TYPE:
445 eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
446 iu->mad.error_log.common.status = 1;
447 send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
448 break;
449 case VIOSRP_ADAPTER_INFO_TYPE:
450 info = &iu->mad.adapter_info;
451 info->common.status = send_adapter_info(iue, info->buffer,
452 info->common.length);
453 send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
454 break;
455 case VIOSRP_HOST_CONFIG_TYPE:
456 conf = &iu->mad.host_config;
457 conf->common.status = 1;
458 send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
459 break;
460 default:
461 eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
462 }
463
464 return 1;
465}
466
467static int process_srp_iu(struct iu_entry *iue)
468{
469 union viosrp_iu *iu = vio_iu(iue);
470 int done = 1;
471 u8 opcode = iu->srp.rsp.opcode;
472
473 switch (opcode) {
474 case SRP_LOGIN_REQ:
475 process_login(iue);
476 break;
477 case SRP_TSK_MGMT:
478 done = process_tsk_mgmt(iue);
479 break;
480 case SRP_CMD:
481 queue_cmd(iue);
482 done = 0;
483 break;
484 case SRP_LOGIN_RSP:
485 case SRP_I_LOGOUT:
486 case SRP_T_LOGOUT:
487 case SRP_RSP:
488 case SRP_CRED_REQ:
489 case SRP_CRED_RSP:
490 case SRP_AER_REQ:
491 case SRP_AER_RSP:
492 eprintk("Unsupported type %u\n", opcode);
493 break;
494 default:
495 eprintk("Unknown type %u\n", opcode);
496 }
497
498 return done;
499}
500
501static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
502{
503 struct vio_port *vport = target_to_port(target);
504 struct iu_entry *iue;
505 long err, done;
506
507 iue = srp_iu_get(target);
508 if (!iue) {
509 eprintk("Error getting IU from pool, %p\n", target);
510 return;
511 }
512
513 iue->remote_token = crq->IU_data_ptr;
514
515 err = h_copy_rdma(crq->IU_length, vport->riobn,
516 iue->remote_token, vport->liobn, iue->sbuf->dma);
517
518 if (err != H_SUCCESS) {
519 eprintk("%ld transferring data error %p\n", err, iue);
520 done = 1;
521 goto out;
522 }
523
524 if (crq->format == VIOSRP_MAD_FORMAT)
525 done = process_mad_iu(iue);
526 else
527 done = process_srp_iu(iue);
528out:
529 if (done)
530 srp_iu_put(iue);
531}
532
533static irqreturn_t ibmvstgt_interrupt(int irq, void *data)
534{
535 struct srp_target *target = (struct srp_target *) data;
536 struct vio_port *vport = target_to_port(target);
537
538 vio_disable_interrupts(vport->dma_dev);
539 queue_work(vtgtd, &vport->crq_work);
540
541 return IRQ_HANDLED;
542}
543
544static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
545{
546 int err;
547 struct vio_port *vport = target_to_port(target);
548
549 queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
550 if (!queue->msgs)
551 goto malloc_failed;
552 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
553
554 queue->msg_token = dma_map_single(target->dev, queue->msgs,
555 queue->size * sizeof(*queue->msgs),
556 DMA_BIDIRECTIONAL);
557
558 if (dma_mapping_error(queue->msg_token))
559 goto map_failed;
560
561 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
562 PAGE_SIZE);
563
564 /* If the adapter was left active for some reason (like kexec)
565 * try freeing and re-registering
566 */
567 if (err == H_RESOURCE) {
568 do {
569 err = h_free_crq(vport->dma_dev->unit_address);
570 } while (err == H_BUSY || H_IS_LONG_BUSY(err));
571
572 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
573 PAGE_SIZE);
574 }
575
576 if (err != H_SUCCESS && err != 2) {
577 eprintk("Error 0x%x opening virtual adapter\n", err);
578 goto reg_crq_failed;
579 }
580
581 err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
582 SA_INTERRUPT, "ibmvstgt", target);
583 if (err)
584 goto req_irq_failed;
585
586 vio_enable_interrupts(vport->dma_dev);
587
588 h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
589
590 queue->cur = 0;
591 spin_lock_init(&queue->lock);
592
593 return 0;
594
595req_irq_failed:
596 do {
597 err = h_free_crq(vport->dma_dev->unit_address);
598 } while (err == H_BUSY || H_IS_LONG_BUSY(err));
599
600reg_crq_failed:
601 dma_unmap_single(target->dev, queue->msg_token,
602 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
603map_failed:
604 free_page((unsigned long) queue->msgs);
605
606malloc_failed:
607 return -ENOMEM;
608}
609
610static void crq_queue_destroy(struct srp_target *target)
611{
612 struct vio_port *vport = target_to_port(target);
613 struct crq_queue *queue = &vport->crq_queue;
614 int err;
615
616 free_irq(vport->dma_dev->irq, target);
617 do {
618 err = h_free_crq(vport->dma_dev->unit_address);
619 } while (err == H_BUSY || H_IS_LONG_BUSY(err));
620
621 dma_unmap_single(target->dev, queue->msg_token,
622 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
623
624 free_page((unsigned long) queue->msgs);
625}
626
627static void process_crq(struct viosrp_crq *crq, struct srp_target *target)
628{
629 struct vio_port *vport = target_to_port(target);
630 dprintk("%x %x\n", crq->valid, crq->format);
631
632 switch (crq->valid) {
633 case 0xC0:
634 /* initialization */
635 switch (crq->format) {
636 case 0x01:
637 h_send_crq(vport->dma_dev->unit_address,
638 0xC002000000000000, 0);
639 break;
640 case 0x02:
641 break;
642 default:
643 eprintk("Unknown format %u\n", crq->format);
644 }
645 break;
646 case 0xFF:
647 /* transport event */
648 break;
649 case 0x80:
650 /* real payload */
651 switch (crq->format) {
652 case VIOSRP_SRP_FORMAT:
653 case VIOSRP_MAD_FORMAT:
654 process_iu(crq, target);
655 break;
656 case VIOSRP_OS400_FORMAT:
657 case VIOSRP_AIX_FORMAT:
658 case VIOSRP_LINUX_FORMAT:
659 case VIOSRP_INLINE_FORMAT:
660 eprintk("Unsupported format %u\n", crq->format);
661 break;
662 default:
663 eprintk("Unknown format %u\n", crq->format);
664 }
665 break;
666 default:
667 eprintk("unknown message type 0x%02x!?\n", crq->valid);
668 }
669}
670
671static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
672{
673 struct viosrp_crq *crq;
674 unsigned long flags;
675
676 spin_lock_irqsave(&queue->lock, flags);
677 crq = &queue->msgs[queue->cur];
678 if (crq->valid & 0x80) {
679 if (++queue->cur == queue->size)
680 queue->cur = 0;
681 } else
682 crq = NULL;
683 spin_unlock_irqrestore(&queue->lock, flags);
684
685 return crq;
686}
687
688static void handle_crq(void *data)
689{
690 struct srp_target *target = (struct srp_target *) data;
691 struct vio_port *vport = target_to_port(target);
692 struct viosrp_crq *crq;
693 int done = 0;
694
695 while (!done) {
696 while ((crq = next_crq(&vport->crq_queue)) != NULL) {
697 process_crq(crq, target);
698 crq->valid = 0x00;
699 }
700
701 vio_enable_interrupts(vport->dma_dev);
702
703 crq = next_crq(&vport->crq_queue);
704 if (crq) {
705 vio_disable_interrupts(vport->dma_dev);
706 process_crq(crq, target);
707 crq->valid = 0x00;
708 } else
709 done = 1;
710 }
711
712 handle_cmd_queue(target);
713}
714
715
716static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
717{
718 unsigned long flags;
719 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
720 struct srp_target *target = iue->target;
721
722 dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
723
724 spin_lock_irqsave(&target->lock, flags);
725 list_del(&iue->ilist);
726 spin_unlock_irqrestore(&target->lock, flags);
727
728 srp_iu_put(iue);
729
730 return 0;
731}
732
733static int ibmvstgt_tsk_mgmt_response(u64 mid, int result)
734{
735 struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
736 union viosrp_iu *iu = vio_iu(iue);
737 unsigned char status, asc;
738
739 eprintk("%p %d\n", iue, result);
740 status = NO_SENSE;
741 asc = 0;
742
743 switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
744 case SRP_TSK_ABORT_TASK:
745 asc = 0x14;
746 if (result)
747 status = ABORTED_COMMAND;
748 break;
749 default:
750 break;
751 }
752
753 send_rsp(iue, NULL, status, asc);
754 srp_iu_put(iue);
755
756 return 0;
757}
758
759static ssize_t system_id_show(struct class_device *cdev, char *buf)
760{
761 return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
762}
763
764static ssize_t partition_number_show(struct class_device *cdev, char *buf)
765{
766 return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
767}
768
769static ssize_t unit_address_show(struct class_device *cdev, char *buf)
770{
771 struct Scsi_Host *shost = class_to_shost(cdev);
772 struct srp_target *target = host_to_srp_target(shost);
773 struct vio_port *vport = target_to_port(target);
774 return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
775}
776
777static CLASS_DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
778static CLASS_DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
779static CLASS_DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
780
781static struct class_device_attribute *ibmvstgt_attrs[] = {
782 &class_device_attr_system_id,
783 &class_device_attr_partition_number,
784 &class_device_attr_unit_address,
785 NULL,
786};
787
788static struct scsi_host_template ibmvstgt_sht = {
789 .name = TGT_NAME,
790 .module = THIS_MODULE,
791 .can_queue = INITIAL_SRP_LIMIT,
792 .sg_tablesize = SG_ALL,
793 .use_clustering = DISABLE_CLUSTERING,
794 .max_sectors = DEFAULT_MAX_SECTORS,
795 .transfer_response = ibmvstgt_cmd_done,
796 .transfer_data = ibmvstgt_transfer_data,
797 .eh_abort_handler = ibmvstgt_eh_abort_handler,
798 .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
799 .shost_attrs = ibmvstgt_attrs,
800 .proc_name = TGT_NAME,
801};
802
803static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
804{
805 struct Scsi_Host *shost;
806 struct srp_target *target;
807 struct vio_port *vport;
808 unsigned int *dma, dma_size;
809 int err = -ENOMEM;
810
811 vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
812 if (!vport)
813 return err;
814 shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
815 if (!shost)
816 goto free_vport;
817 err = scsi_tgt_alloc_queue(shost);
818 if (err)
819 goto put_host;
820
821 target = host_to_srp_target(shost);
822 target->shost = shost;
823 vport->dma_dev = dev;
824 target->ldata = vport;
825 err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
826 SRP_MAX_IU_LEN);
827 if (err)
828 goto put_host;
829
830 dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
831 &dma_size);
832 if (!dma || dma_size != 40) {
833 eprintk("Couldn't get window property %d\n", dma_size);
834 err = -EIO;
835 goto free_srp_target;
836 }
837 vport->liobn = dma[0];
838 vport->riobn = dma[5];
839
840 INIT_WORK(&vport->crq_work, handle_crq, target);
841
842 err = crq_queue_create(&vport->crq_queue, target);
843 if (err)
844 goto free_srp_target;
845
846 err = scsi_add_host(shost, target->dev);
847 if (err)
848 goto destroy_queue;
849 return 0;
850
851destroy_queue:
852 crq_queue_destroy(target);
853free_srp_target:
854 srp_target_free(target);
855put_host:
856 scsi_host_put(shost);
857free_vport:
858 kfree(vport);
859 return err;
860}
861
862static int ibmvstgt_remove(struct vio_dev *dev)
863{
864 struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
865 struct Scsi_Host *shost = target->shost;
866 struct vio_port *vport = target->ldata;
867
868 crq_queue_destroy(target);
869 scsi_remove_host(shost);
870 scsi_tgt_free_queue(shost);
871 srp_target_free(target);
872 kfree(vport);
873 scsi_host_put(shost);
874 return 0;
875}
876
877static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
878 {"v-scsi-host", "IBM,v-scsi-host"},
879 {"",""}
880};
881
882MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
883
884static struct vio_driver ibmvstgt_driver = {
885 .id_table = ibmvstgt_device_table,
886 .probe = ibmvstgt_probe,
887 .remove = ibmvstgt_remove,
888 .driver = {
889 .name = "ibmvscsis",
890 .owner = THIS_MODULE,
891 }
892};
893
894static int get_system_info(void)
895{
896 struct device_node *rootdn;
897 const char *id, *model, *name;
898 unsigned int *num;
899
900 rootdn = find_path_device("/");
901 if (!rootdn)
902 return -ENOENT;
903
904 model = get_property(rootdn, "model", NULL);
905 id = get_property(rootdn, "system-id", NULL);
906 if (model && id)
907 snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
908
909 name = get_property(rootdn, "ibm,partition-name", NULL);
910 if (name)
911 strncpy(partition_name, name, sizeof(partition_name));
912
913 num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL);
914 if (num)
915 partition_number = *num;
916
917 return 0;
918}
919
920static int ibmvstgt_init(void)
921{
922 int err = -ENOMEM;
923
924 printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
925
926 vtgtd = create_workqueue("ibmvtgtd");
927 if (!vtgtd)
928 return err;
929
930 err = get_system_info();
931 if (err)
932 goto destroy_wq;
933
934 err = vio_register_driver(&ibmvstgt_driver);
935 if (err)
936 goto destroy_wq;
937
938 return 0;
939
940destroy_wq:
941 destroy_workqueue(vtgtd);
942 return err;
943}
944
945static void ibmvstgt_exit(void)
946{
947 printk("Unregister IBM virtual SCSI driver\n");
948
949 destroy_workqueue(vtgtd);
950 vio_unregister_driver(&ibmvstgt_driver);
951}
952
953MODULE_DESCRIPTION("IBM Virtual SCSI Target");
954MODULE_AUTHOR("Santiago Leon");
955MODULE_LICENSE("GPL");
956
957module_init(ibmvstgt_init);
958module_exit(ibmvstgt_exit);
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index e31f6122106f..0464c182c577 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -36,7 +36,7 @@ typedef struct {
36 int base_hi; /* Hi Base address for ECP-ISA chipset */ 36 int base_hi; /* Hi Base address for ECP-ISA chipset */
37 int mode; /* Transfer mode */ 37 int mode; /* Transfer mode */
38 struct scsi_cmnd *cur_cmd; /* Current queued command */ 38 struct scsi_cmnd *cur_cmd; /* Current queued command */
39 struct work_struct imm_tq; /* Polling interrupt stuff */ 39 struct delayed_work imm_tq; /* Polling interrupt stuff */
40 unsigned long jstart; /* Jiffies at start */ 40 unsigned long jstart; /* Jiffies at start */
41 unsigned failed:1; /* Failure flag */ 41 unsigned failed:1; /* Failure flag */
42 unsigned dp:1; /* Data phase present */ 42 unsigned dp:1; /* Data phase present */
@@ -733,9 +733,9 @@ static int imm_completion(struct scsi_cmnd *cmd)
733 * the scheduler's task queue to generate a stream of call-backs and 733 * the scheduler's task queue to generate a stream of call-backs and
734 * complete the request when the drive is ready. 734 * complete the request when the drive is ready.
735 */ 735 */
736static void imm_interrupt(void *data) 736static void imm_interrupt(struct work_struct *work)
737{ 737{
738 imm_struct *dev = (imm_struct *) data; 738 imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
739 struct scsi_cmnd *cmd = dev->cur_cmd; 739 struct scsi_cmnd *cmd = dev->cur_cmd;
740 struct Scsi_Host *host = cmd->device->host; 740 struct Scsi_Host *host = cmd->device->host;
741 unsigned long flags; 741 unsigned long flags;
@@ -745,7 +745,6 @@ static void imm_interrupt(void *data)
745 return; 745 return;
746 } 746 }
747 if (imm_engine(dev, cmd)) { 747 if (imm_engine(dev, cmd)) {
748 INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev);
749 schedule_delayed_work(&dev->imm_tq, 1); 748 schedule_delayed_work(&dev->imm_tq, 1);
750 return; 749 return;
751 } 750 }
@@ -953,8 +952,7 @@ static int imm_queuecommand(struct scsi_cmnd *cmd,
953 cmd->result = DID_ERROR << 16; /* default return code */ 952 cmd->result = DID_ERROR << 16; /* default return code */
954 cmd->SCp.phase = 0; /* bus free */ 953 cmd->SCp.phase = 0; /* bus free */
955 954
956 INIT_WORK(&dev->imm_tq, imm_interrupt, dev); 955 schedule_delayed_work(&dev->imm_tq, 0);
957 schedule_work(&dev->imm_tq);
958 956
959 imm_pb_claim(dev); 957 imm_pb_claim(dev);
960 958
@@ -1225,7 +1223,7 @@ static int __imm_attach(struct parport *pb)
1225 else 1223 else
1226 ports = 8; 1224 ports = 8;
1227 1225
1228 INIT_WORK(&dev->imm_tq, imm_interrupt, dev); 1226 INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt);
1229 1227
1230 err = -ENOMEM; 1228 err = -ENOMEM;
1231 host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); 1229 host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index afed293dd7b9..f160357e37a6 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -170,7 +170,7 @@ static int setup_debug = 0;
170static void i91uSCBPost(BYTE * pHcb, BYTE * pScb); 170static void i91uSCBPost(BYTE * pHcb, BYTE * pScb);
171 171
172/* PCI Devices supported by this driver */ 172/* PCI Devices supported by this driver */
173static struct pci_device_id i91u_pci_devices[] __devinitdata = { 173static struct pci_device_id i91u_pci_devices[] = {
174 { PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 174 { PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
175 { PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 175 { PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
176 { PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 176 { PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2dde821025f3..ccd4dafce8e2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -79,7 +79,6 @@
79#include <scsi/scsi_tcq.h> 79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h> 80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h> 81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_transport.h>
83#include "ipr.h" 82#include "ipr.h"
84 83
85/* 84/*
@@ -98,7 +97,7 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
98 97
99/* This table describes the differences between DMA controller chips */ 98/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { 99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 { /* Gemstone, Citrine, and Obsidian */ 100 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
102 .mailbox = 0x0042C, 101 .mailbox = 0x0042C,
103 .cache_line_size = 0x20, 102 .cache_line_size = 0x20,
104 { 103 {
@@ -135,6 +134,7 @@ static const struct ipr_chip_t ipr_chip[] = {
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, 134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, 135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, 136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, 138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } 139 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140}; 140};
@@ -1249,19 +1249,23 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1249 1249
1250/** 1250/**
1251 * ipr_log_hex_data - Log additional hex IOA error data. 1251 * ipr_log_hex_data - Log additional hex IOA error data.
1252 * @ioa_cfg: ioa config struct
1252 * @data: IOA error data 1253 * @data: IOA error data
1253 * @len: data length 1254 * @len: data length
1254 * 1255 *
1255 * Return value: 1256 * Return value:
1256 * none 1257 * none
1257 **/ 1258 **/
1258static void ipr_log_hex_data(u32 *data, int len) 1259static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1259{ 1260{
1260 int i; 1261 int i;
1261 1262
1262 if (len == 0) 1263 if (len == 0)
1263 return; 1264 return;
1264 1265
1266 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1267 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1268
1265 for (i = 0; i < len / 4; i += 4) { 1269 for (i = 0; i < len / 4; i += 4) {
1266 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 1270 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1267 be32_to_cpu(data[i]), 1271 be32_to_cpu(data[i]),
@@ -1290,7 +1294,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1290 ipr_err("%s\n", error->failure_reason); 1294 ipr_err("%s\n", error->failure_reason);
1291 ipr_err("Remote Adapter VPD:\n"); 1295 ipr_err("Remote Adapter VPD:\n");
1292 ipr_log_ext_vpd(&error->vpd); 1296 ipr_log_ext_vpd(&error->vpd);
1293 ipr_log_hex_data(error->data, 1297 ipr_log_hex_data(ioa_cfg, error->data,
1294 be32_to_cpu(hostrcb->hcam.length) - 1298 be32_to_cpu(hostrcb->hcam.length) -
1295 (offsetof(struct ipr_hostrcb_error, u) + 1299 (offsetof(struct ipr_hostrcb_error, u) +
1296 offsetof(struct ipr_hostrcb_type_17_error, data))); 1300 offsetof(struct ipr_hostrcb_type_17_error, data)));
@@ -1315,12 +1319,225 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1315 ipr_err("%s\n", error->failure_reason); 1319 ipr_err("%s\n", error->failure_reason);
1316 ipr_err("Remote Adapter VPD:\n"); 1320 ipr_err("Remote Adapter VPD:\n");
1317 ipr_log_vpd(&error->vpd); 1321 ipr_log_vpd(&error->vpd);
1318 ipr_log_hex_data(error->data, 1322 ipr_log_hex_data(ioa_cfg, error->data,
1319 be32_to_cpu(hostrcb->hcam.length) - 1323 be32_to_cpu(hostrcb->hcam.length) -
1320 (offsetof(struct ipr_hostrcb_error, u) + 1324 (offsetof(struct ipr_hostrcb_error, u) +
1321 offsetof(struct ipr_hostrcb_type_07_error, data))); 1325 offsetof(struct ipr_hostrcb_type_07_error, data)));
1322} 1326}
1323 1327
1328static const struct {
1329 u8 active;
1330 char *desc;
1331} path_active_desc[] = {
1332 { IPR_PATH_NO_INFO, "Path" },
1333 { IPR_PATH_ACTIVE, "Active path" },
1334 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1335};
1336
1337static const struct {
1338 u8 state;
1339 char *desc;
1340} path_state_desc[] = {
1341 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1342 { IPR_PATH_HEALTHY, "is healthy" },
1343 { IPR_PATH_DEGRADED, "is degraded" },
1344 { IPR_PATH_FAILED, "is failed" }
1345};
1346
1347/**
1348 * ipr_log_fabric_path - Log a fabric path error
1349 * @hostrcb: hostrcb struct
1350 * @fabric: fabric descriptor
1351 *
1352 * Return value:
1353 * none
1354 **/
1355static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1356 struct ipr_hostrcb_fabric_desc *fabric)
1357{
1358 int i, j;
1359 u8 path_state = fabric->path_state;
1360 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1361 u8 state = path_state & IPR_PATH_STATE_MASK;
1362
1363 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1364 if (path_active_desc[i].active != active)
1365 continue;
1366
1367 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1368 if (path_state_desc[j].state != state)
1369 continue;
1370
1371 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1372 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1373 path_active_desc[i].desc, path_state_desc[j].desc,
1374 fabric->ioa_port);
1375 } else if (fabric->cascaded_expander == 0xff) {
1376 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1377 path_active_desc[i].desc, path_state_desc[j].desc,
1378 fabric->ioa_port, fabric->phy);
1379 } else if (fabric->phy == 0xff) {
1380 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1381 path_active_desc[i].desc, path_state_desc[j].desc,
1382 fabric->ioa_port, fabric->cascaded_expander);
1383 } else {
1384 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1385 path_active_desc[i].desc, path_state_desc[j].desc,
1386 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1387 }
1388 return;
1389 }
1390 }
1391
1392 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1393 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1394}
1395
1396static const struct {
1397 u8 type;
1398 char *desc;
1399} path_type_desc[] = {
1400 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1401 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1402 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1403 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1404};
1405
1406static const struct {
1407 u8 status;
1408 char *desc;
1409} path_status_desc[] = {
1410 { IPR_PATH_CFG_NO_PROB, "Functional" },
1411 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1412 { IPR_PATH_CFG_FAILED, "Failed" },
1413 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1414 { IPR_PATH_NOT_DETECTED, "Missing" },
1415 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1416};
1417
1418static const char *link_rate[] = {
1419 "unknown",
1420 "disabled",
1421 "phy reset problem",
1422 "spinup hold",
1423 "port selector",
1424 "unknown",
1425 "unknown",
1426 "unknown",
1427 "1.5Gbps",
1428 "3.0Gbps",
1429 "unknown",
1430 "unknown",
1431 "unknown",
1432 "unknown",
1433 "unknown",
1434 "unknown"
1435};
1436
1437/**
1438 * ipr_log_path_elem - Log a fabric path element.
1439 * @hostrcb: hostrcb struct
1440 * @cfg: fabric path element struct
1441 *
1442 * Return value:
1443 * none
1444 **/
1445static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1446 struct ipr_hostrcb_config_element *cfg)
1447{
1448 int i, j;
1449 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1450 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1451
1452 if (type == IPR_PATH_CFG_NOT_EXIST)
1453 return;
1454
1455 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1456 if (path_type_desc[i].type != type)
1457 continue;
1458
1459 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1460 if (path_status_desc[j].status != status)
1461 continue;
1462
1463 if (type == IPR_PATH_CFG_IOA_PORT) {
1464 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1465 path_status_desc[j].desc, path_type_desc[i].desc,
1466 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1467 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1468 } else {
1469 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1470 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1471 path_status_desc[j].desc, path_type_desc[i].desc,
1472 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1473 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1474 } else if (cfg->cascaded_expander == 0xff) {
1475 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1476 "WWN=%08X%08X\n", path_status_desc[j].desc,
1477 path_type_desc[i].desc, cfg->phy,
1478 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1479 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1480 } else if (cfg->phy == 0xff) {
1481 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1482 "WWN=%08X%08X\n", path_status_desc[j].desc,
1483 path_type_desc[i].desc, cfg->cascaded_expander,
1484 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1485 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1486 } else {
1487 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1488 "WWN=%08X%08X\n", path_status_desc[j].desc,
1489 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1490 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1491 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1492 }
1493 }
1494 return;
1495 }
1496 }
1497
1498 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1499 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1500 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1501 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1502}
1503
1504/**
1505 * ipr_log_fabric_error - Log a fabric error.
1506 * @ioa_cfg: ioa config struct
1507 * @hostrcb: hostrcb struct
1508 *
1509 * Return value:
1510 * none
1511 **/
1512static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1513 struct ipr_hostrcb *hostrcb)
1514{
1515 struct ipr_hostrcb_type_20_error *error;
1516 struct ipr_hostrcb_fabric_desc *fabric;
1517 struct ipr_hostrcb_config_element *cfg;
1518 int i, add_len;
1519
1520 error = &hostrcb->hcam.u.error.u.type_20_error;
1521 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1522 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1523
1524 add_len = be32_to_cpu(hostrcb->hcam.length) -
1525 (offsetof(struct ipr_hostrcb_error, u) +
1526 offsetof(struct ipr_hostrcb_type_20_error, desc));
1527
1528 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1529 ipr_log_fabric_path(hostrcb, fabric);
1530 for_each_fabric_cfg(fabric, cfg)
1531 ipr_log_path_elem(hostrcb, cfg);
1532
1533 add_len -= be16_to_cpu(fabric->length);
1534 fabric = (struct ipr_hostrcb_fabric_desc *)
1535 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1536 }
1537
1538 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1539}
1540
1324/** 1541/**
1325 * ipr_log_generic_error - Log an adapter error. 1542 * ipr_log_generic_error - Log an adapter error.
1326 * @ioa_cfg: ioa config struct 1543 * @ioa_cfg: ioa config struct
@@ -1332,7 +1549,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1332static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, 1549static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1333 struct ipr_hostrcb *hostrcb) 1550 struct ipr_hostrcb *hostrcb)
1334{ 1551{
1335 ipr_log_hex_data(hostrcb->hcam.u.raw.data, 1552 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1336 be32_to_cpu(hostrcb->hcam.length)); 1553 be32_to_cpu(hostrcb->hcam.length));
1337} 1554}
1338 1555
@@ -1394,13 +1611,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1394 if (!ipr_error_table[error_index].log_hcam) 1611 if (!ipr_error_table[error_index].log_hcam)
1395 return; 1612 return;
1396 1613
1397 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) { 1614 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1398 ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1399 "%s\n", ipr_error_table[error_index].error);
1400 } else {
1401 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1402 ipr_error_table[error_index].error);
1403 }
1404 1615
1405 /* Set indication we have logged an error */ 1616 /* Set indication we have logged an error */
1406 ioa_cfg->errors_logged++; 1617 ioa_cfg->errors_logged++;
@@ -1437,6 +1648,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1437 case IPR_HOST_RCB_OVERLAY_ID_17: 1648 case IPR_HOST_RCB_OVERLAY_ID_17:
1438 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); 1649 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1439 break; 1650 break;
1651 case IPR_HOST_RCB_OVERLAY_ID_20:
1652 ipr_log_fabric_error(ioa_cfg, hostrcb);
1653 break;
1440 case IPR_HOST_RCB_OVERLAY_ID_1: 1654 case IPR_HOST_RCB_OVERLAY_ID_1:
1441 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 1655 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1442 default: 1656 default:
@@ -2093,7 +2307,7 @@ static void ipr_release_dump(struct kref *kref)
2093 2307
2094/** 2308/**
2095 * ipr_worker_thread - Worker thread 2309 * ipr_worker_thread - Worker thread
2096 * @data: ioa config struct 2310 * @work: ioa config struct
2097 * 2311 *
2098 * Called at task level from a work thread. This function takes care 2312 * Called at task level from a work thread. This function takes care
2099 * of adding and removing device from the mid-layer as configuration 2313 * of adding and removing device from the mid-layer as configuration
@@ -2102,13 +2316,14 @@ static void ipr_release_dump(struct kref *kref)
2102 * Return value: 2316 * Return value:
2103 * nothing 2317 * nothing
2104 **/ 2318 **/
2105static void ipr_worker_thread(void *data) 2319static void ipr_worker_thread(struct work_struct *work)
2106{ 2320{
2107 unsigned long lock_flags; 2321 unsigned long lock_flags;
2108 struct ipr_resource_entry *res; 2322 struct ipr_resource_entry *res;
2109 struct scsi_device *sdev; 2323 struct scsi_device *sdev;
2110 struct ipr_dump *dump; 2324 struct ipr_dump *dump;
2111 struct ipr_ioa_cfg *ioa_cfg = data; 2325 struct ipr_ioa_cfg *ioa_cfg =
2326 container_of(work, struct ipr_ioa_cfg, work_q);
2112 u8 bus, target, lun; 2327 u8 bus, target, lun;
2113 int did_work; 2328 int did_work;
2114 2329
@@ -2969,7 +3184,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2969 struct ipr_dump *dump; 3184 struct ipr_dump *dump;
2970 unsigned long lock_flags = 0; 3185 unsigned long lock_flags = 0;
2971 3186
2972 ENTER;
2973 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 3187 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2974 3188
2975 if (!dump) { 3189 if (!dump) {
@@ -2996,7 +3210,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2996 } 3210 }
2997 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2998 3212
2999 LEAVE;
3000 return 0; 3213 return 0;
3001} 3214}
3002 3215
@@ -3573,6 +3786,12 @@ static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
3573 3786
3574 ENTER; 3787 ENTER;
3575 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3788 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3789 while(ioa_cfg->in_reset_reload) {
3790 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3791 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3792 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3793 }
3794
3576 res = sata_port->res; 3795 res = sata_port->res;
3577 if (res) { 3796 if (res) {
3578 rc = ipr_device_reset(ioa_cfg, res); 3797 rc = ipr_device_reset(ioa_cfg, res);
@@ -3636,6 +3855,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3636 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 3855 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3637 if (ipr_cmd->scsi_cmd) 3856 if (ipr_cmd->scsi_cmd)
3638 ipr_cmd->done = ipr_scsi_eh_done; 3857 ipr_cmd->done = ipr_scsi_eh_done;
3858 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3859 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3860 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3861 }
3639 } 3862 }
3640 } 3863 }
3641 3864
@@ -3770,7 +3993,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3770 */ 3993 */
3771 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead) 3994 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3772 return FAILED; 3995 return FAILED;
3773 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) 3996 if (!res || !ipr_is_gscsi(res))
3774 return FAILED; 3997 return FAILED;
3775 3998
3776 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 3999 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
@@ -4615,7 +4838,7 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4615 * Return value: 4838 * Return value:
4616 * 0 on success / other on failure 4839 * 0 on success / other on failure
4617 **/ 4840 **/
4618int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 4841static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4619{ 4842{
4620 struct ipr_resource_entry *res; 4843 struct ipr_resource_entry *res;
4621 4844
@@ -4648,40 +4871,6 @@ static const char * ipr_ioa_info(struct Scsi_Host *host)
4648 return buffer; 4871 return buffer;
4649} 4872}
4650 4873
4651/**
4652 * ipr_scsi_timed_out - Handle scsi command timeout
4653 * @scsi_cmd: scsi command struct
4654 *
4655 * Return value:
4656 * EH_NOT_HANDLED
4657 **/
4658enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
4659{
4660 struct ipr_ioa_cfg *ioa_cfg;
4661 struct ipr_cmnd *ipr_cmd;
4662 unsigned long flags;
4663
4664 ENTER;
4665 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4666 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4667
4668 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4669 if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
4670 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4671 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4672 break;
4673 }
4674 }
4675
4676 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4677 LEAVE;
4678 return EH_NOT_HANDLED;
4679}
4680
4681static struct scsi_transport_template ipr_transport_template = {
4682 .eh_timed_out = ipr_scsi_timed_out
4683};
4684
4685static struct scsi_host_template driver_template = { 4874static struct scsi_host_template driver_template = {
4686 .module = THIS_MODULE, 4875 .module = THIS_MODULE,
4687 .name = "IPR", 4876 .name = "IPR",
@@ -4776,6 +4965,12 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4776 unsigned long flags; 4965 unsigned long flags;
4777 4966
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 4967 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4968 while(ioa_cfg->in_reset_reload) {
4969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4970 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4971 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4972 }
4973
4779 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4974 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4780 if (ipr_cmd->qc == qc) { 4975 if (ipr_cmd->qc == qc) {
4781 ipr_device_reset(ioa_cfg, sata_port->res); 4976 ipr_device_reset(ioa_cfg, sata_port->res);
@@ -6832,6 +7027,7 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6832 7027
6833 ioa_cfg->hostrcb[i]->hostrcb_dma = 7028 ioa_cfg->hostrcb[i]->hostrcb_dma =
6834 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); 7029 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7030 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
6835 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); 7031 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6836 } 7032 }
6837 7033
@@ -6926,7 +7122,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6926 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 7122 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6927 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 7123 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6928 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 7124 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6929 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); 7125 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
6930 init_waitqueue_head(&ioa_cfg->reset_wait_q); 7126 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6931 ioa_cfg->sdt_state = INACTIVE; 7127 ioa_cfg->sdt_state = INACTIVE;
6932 if (ipr_enable_cache) 7128 if (ipr_enable_cache)
@@ -7017,7 +7213,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7017 7213
7018 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 7214 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7019 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 7215 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7020 host->transportt = &ipr_transport_template;
7021 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, 7216 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7022 sata_port_info.flags, &ipr_sata_ops); 7217 sata_port_info.flags, &ipr_sata_ops);
7023 7218
@@ -7351,12 +7546,24 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7351 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7546 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7352 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 7547 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7353 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7548 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7549 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7550 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7551 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7354 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7552 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7355 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 7553 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
7356 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7554 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7357 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7555 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7358 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 7556 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7359 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7557 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7558 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7559 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7560 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7561 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7562 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
7563 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7564 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
7566 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7360 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 7567 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7361 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 7568 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
7362 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, 7569 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
@@ -7366,6 +7573,9 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7366 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7573 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7367 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 7574 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
7368 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, 7575 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7576 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7577 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F,
7578 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7369 { } 7579 { }
7370}; 7580};
7371MODULE_DEVICE_TABLE(pci, ipr_pci_table); 7581MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 6d035283af08..9f62a1d4d511 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.2.0" 40#define IPR_DRIVER_VERSION "2.3.0"
41#define IPR_DRIVER_DATE "(September 25, 2006)" 41#define IPR_DRIVER_DATE "(November 8, 2006)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -54,6 +54,8 @@
54 */ 54 */
55#define IPR_NUM_BASE_CMD_BLKS 100 55#define IPR_NUM_BASE_CMD_BLKS 100
56 56
57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
58
57#define IPR_SUBS_DEV_ID_2780 0x0264 59#define IPR_SUBS_DEV_ID_2780 0x0264
58#define IPR_SUBS_DEV_ID_5702 0x0266 60#define IPR_SUBS_DEV_ID_5702 0x0266
59#define IPR_SUBS_DEV_ID_5703 0x0278 61#define IPR_SUBS_DEV_ID_5703 0x0278
@@ -66,7 +68,11 @@
66#define IPR_SUBS_DEV_ID_571F 0x02D5 68#define IPR_SUBS_DEV_ID_571F 0x02D5
67#define IPR_SUBS_DEV_ID_572A 0x02C1 69#define IPR_SUBS_DEV_ID_572A 0x02C1
68#define IPR_SUBS_DEV_ID_572B 0x02C2 70#define IPR_SUBS_DEV_ID_572B 0x02C2
71#define IPR_SUBS_DEV_ID_572F 0x02C3
69#define IPR_SUBS_DEV_ID_575B 0x030D 72#define IPR_SUBS_DEV_ID_575B 0x030D
73#define IPR_SUBS_DEV_ID_575C 0x0338
74#define IPR_SUBS_DEV_ID_57B7 0x0360
75#define IPR_SUBS_DEV_ID_57B8 0x02C2
70 76
71#define IPR_NAME "ipr" 77#define IPR_NAME "ipr"
72 78
@@ -98,6 +104,7 @@
98#define IPR_IOASC_IOA_WAS_RESET 0x10000001 104#define IPR_IOASC_IOA_WAS_RESET 0x10000001
99#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002 105#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002
100 106
107#define IPR_DEFAULT_MAX_ERROR_DUMP 984
101#define IPR_NUM_LOG_HCAMS 2 108#define IPR_NUM_LOG_HCAMS 2
102#define IPR_NUM_CFG_CHG_HCAMS 2 109#define IPR_NUM_CFG_CHG_HCAMS 2
103#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) 110#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
@@ -731,6 +738,64 @@ struct ipr_hostrcb_type_17_error {
731 u32 data[476]; 738 u32 data[476];
732}__attribute__((packed, aligned (4))); 739}__attribute__((packed, aligned (4)));
733 740
741struct ipr_hostrcb_config_element {
742 u8 type_status;
743#define IPR_PATH_CFG_TYPE_MASK 0xF0
744#define IPR_PATH_CFG_NOT_EXIST 0x00
745#define IPR_PATH_CFG_IOA_PORT 0x10
746#define IPR_PATH_CFG_EXP_PORT 0x20
747#define IPR_PATH_CFG_DEVICE_PORT 0x30
748#define IPR_PATH_CFG_DEVICE_LUN 0x40
749
750#define IPR_PATH_CFG_STATUS_MASK 0x0F
751#define IPR_PATH_CFG_NO_PROB 0x00
752#define IPR_PATH_CFG_DEGRADED 0x01
753#define IPR_PATH_CFG_FAILED 0x02
754#define IPR_PATH_CFG_SUSPECT 0x03
755#define IPR_PATH_NOT_DETECTED 0x04
756#define IPR_PATH_INCORRECT_CONN 0x05
757
758 u8 cascaded_expander;
759 u8 phy;
760 u8 link_rate;
761#define IPR_PHY_LINK_RATE_MASK 0x0F
762
763 __be32 wwid[2];
764}__attribute__((packed, aligned (4)));
765
766struct ipr_hostrcb_fabric_desc {
767 __be16 length;
768 u8 ioa_port;
769 u8 cascaded_expander;
770 u8 phy;
771 u8 path_state;
772#define IPR_PATH_ACTIVE_MASK 0xC0
773#define IPR_PATH_NO_INFO 0x00
774#define IPR_PATH_ACTIVE 0x40
775#define IPR_PATH_NOT_ACTIVE 0x80
776
777#define IPR_PATH_STATE_MASK 0x0F
778#define IPR_PATH_STATE_NO_INFO 0x00
779#define IPR_PATH_HEALTHY 0x01
780#define IPR_PATH_DEGRADED 0x02
781#define IPR_PATH_FAILED 0x03
782
783 __be16 num_entries;
784 struct ipr_hostrcb_config_element elem[1];
785}__attribute__((packed, aligned (4)));
786
787#define for_each_fabric_cfg(fabric, cfg) \
788 for (cfg = (fabric)->elem; \
789 cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
790 cfg++)
791
792struct ipr_hostrcb_type_20_error {
793 u8 failure_reason[64];
794 u8 reserved[3];
795 u8 num_entries;
796 struct ipr_hostrcb_fabric_desc desc[1];
797}__attribute__((packed, aligned (4)));
798
734struct ipr_hostrcb_error { 799struct ipr_hostrcb_error {
735 __be32 failing_dev_ioasc; 800 __be32 failing_dev_ioasc;
736 struct ipr_res_addr failing_dev_res_addr; 801 struct ipr_res_addr failing_dev_res_addr;
@@ -747,6 +812,7 @@ struct ipr_hostrcb_error {
747 struct ipr_hostrcb_type_13_error type_13_error; 812 struct ipr_hostrcb_type_13_error type_13_error;
748 struct ipr_hostrcb_type_14_error type_14_error; 813 struct ipr_hostrcb_type_14_error type_14_error;
749 struct ipr_hostrcb_type_17_error type_17_error; 814 struct ipr_hostrcb_type_17_error type_17_error;
815 struct ipr_hostrcb_type_20_error type_20_error;
750 } u; 816 } u;
751}__attribute__((packed, aligned (4))); 817}__attribute__((packed, aligned (4)));
752 818
@@ -786,6 +852,7 @@ struct ipr_hcam {
786#define IPR_HOST_RCB_OVERLAY_ID_14 0x14 852#define IPR_HOST_RCB_OVERLAY_ID_14 0x14
787#define IPR_HOST_RCB_OVERLAY_ID_16 0x16 853#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
788#define IPR_HOST_RCB_OVERLAY_ID_17 0x17 854#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
855#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
789#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF 856#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
790 857
791 u8 reserved1[3]; 858 u8 reserved1[3];
@@ -805,6 +872,7 @@ struct ipr_hostrcb {
805 struct ipr_hcam hcam; 872 struct ipr_hcam hcam;
806 dma_addr_t hostrcb_dma; 873 dma_addr_t hostrcb_dma;
807 struct list_head queue; 874 struct list_head queue;
875 struct ipr_ioa_cfg *ioa_cfg;
808}; 876};
809 877
810/* IPR smart dump table structures */ 878/* IPR smart dump table structures */
@@ -1283,6 +1351,17 @@ struct ipr_ucode_image_header {
1283 } \ 1351 } \
1284} 1352}
1285 1353
1354#define ipr_hcam_err(hostrcb, fmt, ...) \
1355{ \
1356 if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \
1357 ipr_ra_err((hostrcb)->ioa_cfg, \
1358 (hostrcb)->hcam.u.error.failing_dev_res_addr, \
1359 fmt, ##__VA_ARGS__); \
1360 } else { \
1361 dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \
1362 } \
1363}
1364
1286#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ 1365#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
1287 __FILE__, __FUNCTION__, __LINE__) 1366 __FILE__, __FUNCTION__, __LINE__)
1288 1367
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index f06a06ae6092..8b704f73055a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -5001,7 +5001,7 @@ ips_init_copperhead(ips_ha_t * ha)
5001 break; 5001 break;
5002 5002
5003 /* Delay for 1 Second */ 5003 /* Delay for 1 Second */
5004 msleep(IPS_ONE_SEC); 5004 MDELAY(IPS_ONE_SEC);
5005 } 5005 }
5006 5006
5007 if (j >= 45) 5007 if (j >= 45)
@@ -5027,7 +5027,7 @@ ips_init_copperhead(ips_ha_t * ha)
5027 break; 5027 break;
5028 5028
5029 /* Delay for 1 Second */ 5029 /* Delay for 1 Second */
5030 msleep(IPS_ONE_SEC); 5030 MDELAY(IPS_ONE_SEC);
5031 } 5031 }
5032 5032
5033 if (j >= 240) 5033 if (j >= 240)
@@ -5045,7 +5045,7 @@ ips_init_copperhead(ips_ha_t * ha)
5045 break; 5045 break;
5046 5046
5047 /* Delay for 1 Second */ 5047 /* Delay for 1 Second */
5048 msleep(IPS_ONE_SEC); 5048 MDELAY(IPS_ONE_SEC);
5049 } 5049 }
5050 5050
5051 if (i >= 240) 5051 if (i >= 240)
@@ -5095,7 +5095,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
5095 break; 5095 break;
5096 5096
5097 /* Delay for 1 Second */ 5097 /* Delay for 1 Second */
5098 msleep(IPS_ONE_SEC); 5098 MDELAY(IPS_ONE_SEC);
5099 } 5099 }
5100 5100
5101 if (j >= 45) 5101 if (j >= 45)
@@ -5121,7 +5121,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
5121 break; 5121 break;
5122 5122
5123 /* Delay for 1 Second */ 5123 /* Delay for 1 Second */
5124 msleep(IPS_ONE_SEC); 5124 MDELAY(IPS_ONE_SEC);
5125 } 5125 }
5126 5126
5127 if (j >= 240) 5127 if (j >= 240)
@@ -5139,7 +5139,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
5139 break; 5139 break;
5140 5140
5141 /* Delay for 1 Second */ 5141 /* Delay for 1 Second */
5142 msleep(IPS_ONE_SEC); 5142 MDELAY(IPS_ONE_SEC);
5143 } 5143 }
5144 5144
5145 if (i >= 240) 5145 if (i >= 240)
@@ -5191,7 +5191,7 @@ ips_init_morpheus(ips_ha_t * ha)
5191 break; 5191 break;
5192 5192
5193 /* Delay for 1 Second */ 5193 /* Delay for 1 Second */
5194 msleep(IPS_ONE_SEC); 5194 MDELAY(IPS_ONE_SEC);
5195 } 5195 }
5196 5196
5197 if (i >= 45) { 5197 if (i >= 45) {
@@ -5217,7 +5217,7 @@ ips_init_morpheus(ips_ha_t * ha)
5217 if (Post != 0x4F00) 5217 if (Post != 0x4F00)
5218 break; 5218 break;
5219 /* Delay for 1 Second */ 5219 /* Delay for 1 Second */
5220 msleep(IPS_ONE_SEC); 5220 MDELAY(IPS_ONE_SEC);
5221 } 5221 }
5222 5222
5223 if (i >= 120) { 5223 if (i >= 120) {
@@ -5247,7 +5247,7 @@ ips_init_morpheus(ips_ha_t * ha)
5247 break; 5247 break;
5248 5248
5249 /* Delay for 1 Second */ 5249 /* Delay for 1 Second */
5250 msleep(IPS_ONE_SEC); 5250 MDELAY(IPS_ONE_SEC);
5251 } 5251 }
5252 5252
5253 if (i >= 240) { 5253 if (i >= 240) {
@@ -5307,12 +5307,12 @@ ips_reset_copperhead(ips_ha_t * ha)
5307 outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR); 5307 outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
5308 5308
5309 /* Delay for 1 Second */ 5309 /* Delay for 1 Second */
5310 msleep(IPS_ONE_SEC); 5310 MDELAY(IPS_ONE_SEC);
5311 5311
5312 outb(0, ha->io_addr + IPS_REG_SCPR); 5312 outb(0, ha->io_addr + IPS_REG_SCPR);
5313 5313
5314 /* Delay for 1 Second */ 5314 /* Delay for 1 Second */
5315 msleep(IPS_ONE_SEC); 5315 MDELAY(IPS_ONE_SEC);
5316 5316
5317 if ((*ha->func.init) (ha)) 5317 if ((*ha->func.init) (ha))
5318 break; 5318 break;
@@ -5352,12 +5352,12 @@ ips_reset_copperhead_memio(ips_ha_t * ha)
5352 writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR); 5352 writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
5353 5353
5354 /* Delay for 1 Second */ 5354 /* Delay for 1 Second */
5355 msleep(IPS_ONE_SEC); 5355 MDELAY(IPS_ONE_SEC);
5356 5356
5357 writeb(0, ha->mem_ptr + IPS_REG_SCPR); 5357 writeb(0, ha->mem_ptr + IPS_REG_SCPR);
5358 5358
5359 /* Delay for 1 Second */ 5359 /* Delay for 1 Second */
5360 msleep(IPS_ONE_SEC); 5360 MDELAY(IPS_ONE_SEC);
5361 5361
5362 if ((*ha->func.init) (ha)) 5362 if ((*ha->func.init) (ha))
5363 break; 5363 break;
@@ -5398,7 +5398,7 @@ ips_reset_morpheus(ips_ha_t * ha)
5398 writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR); 5398 writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
5399 5399
5400 /* Delay for 5 Seconds */ 5400 /* Delay for 5 Seconds */
5401 msleep(5 * IPS_ONE_SEC); 5401 MDELAY(5 * IPS_ONE_SEC);
5402 5402
5403 /* Do a PCI config read to wait for adapter */ 5403 /* Do a PCI config read to wait for adapter */
5404 pci_read_config_byte(ha->pcidev, 4, &junk); 5404 pci_read_config_byte(ha->pcidev, 4, &junk);
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 34680f3dd452..b726dcc424b1 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -51,6 +51,7 @@
51 #define _IPS_H_ 51 #define _IPS_H_
52 52
53#include <linux/version.h> 53#include <linux/version.h>
54#include <linux/nmi.h>
54 #include <asm/uaccess.h> 55 #include <asm/uaccess.h>
55 #include <asm/io.h> 56 #include <asm/io.h>
56 57
@@ -116,9 +117,11 @@
116 dev_printk(level , &((pcidev)->dev) , format , ## arg) 117 dev_printk(level , &((pcidev)->dev) , format , ## arg)
117 #endif 118 #endif
118 119
119 #ifndef MDELAY 120 #define MDELAY(n) \
120 #define MDELAY mdelay 121 do { \
121 #endif 122 mdelay(n); \
123 touch_nmi_watchdog(); \
124 } while (0)
122 125
123 #ifndef min 126 #ifndef min
124 #define min(x,y) ((x) < (y) ? x : y) 127 #define min(x,y) ((x) < (y) ? x : y)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 5d8862189485..e11b23c641e2 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -719,9 +719,10 @@ again:
719 return rc; 719 return rc;
720} 720}
721 721
722static void iscsi_xmitworker(void *data) 722static void iscsi_xmitworker(struct work_struct *work)
723{ 723{
724 struct iscsi_conn *conn = data; 724 struct iscsi_conn *conn =
725 container_of(work, struct iscsi_conn, xmitwork);
725 int rc; 726 int rc;
726 /* 727 /*
727 * serialize Xmit worker on a per-connection basis. 728 * serialize Xmit worker on a per-connection basis.
@@ -1512,7 +1513,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1512 if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) 1513 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
1513 goto mgmtqueue_alloc_fail; 1514 goto mgmtqueue_alloc_fail;
1514 1515
1515 INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn); 1516 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
1516 1517
1517 /* allocate login_mtask used for the login/text sequences */ 1518 /* allocate login_mtask used for the login/text sequences */
1518 spin_lock_bh(&session->lock); 1519 spin_lock_bh(&session->lock);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d977bd492d8d..fb7df7b75811 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -647,10 +647,12 @@ void sas_unregister_domain_devices(struct asd_sas_port *port)
647 * Discover process only interrogates devices in order to discover the 647 * Discover process only interrogates devices in order to discover the
648 * domain. 648 * domain.
649 */ 649 */
650static void sas_discover_domain(void *data) 650static void sas_discover_domain(struct work_struct *work)
651{ 651{
652 int error = 0; 652 int error = 0;
653 struct asd_sas_port *port = data; 653 struct sas_discovery_event *ev =
654 container_of(work, struct sas_discovery_event, work);
655 struct asd_sas_port *port = ev->port;
654 656
655 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, 657 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
656 &port->disc.pending); 658 &port->disc.pending);
@@ -692,10 +694,12 @@ static void sas_discover_domain(void *data)
692 current->pid, error); 694 current->pid, error);
693} 695}
694 696
695static void sas_revalidate_domain(void *data) 697static void sas_revalidate_domain(struct work_struct *work)
696{ 698{
697 int res = 0; 699 int res = 0;
698 struct asd_sas_port *port = data; 700 struct sas_discovery_event *ev =
701 container_of(work, struct sas_discovery_event, work);
702 struct asd_sas_port *port = ev->port;
699 703
700 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, 704 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
701 &port->disc.pending); 705 &port->disc.pending);
@@ -722,7 +726,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
722 BUG_ON(ev >= DISC_NUM_EVENTS); 726 BUG_ON(ev >= DISC_NUM_EVENTS);
723 727
724 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, 728 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
725 &disc->disc_work[ev], port->ha->core.shost); 729 &disc->disc_work[ev].work, port->ha->core.shost);
726 730
727 return 0; 731 return 0;
728} 732}
@@ -737,13 +741,15 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
737{ 741{
738 int i; 742 int i;
739 743
740 static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = { 744 static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
741 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, 745 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
742 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, 746 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
743 }; 747 };
744 748
745 spin_lock_init(&disc->disc_event_lock); 749 spin_lock_init(&disc->disc_event_lock);
746 disc->pending = 0; 750 disc->pending = 0;
747 for (i = 0; i < DISC_NUM_EVENTS; i++) 751 for (i = 0; i < DISC_NUM_EVENTS; i++) {
748 INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port); 752 INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
753 disc->disc_work[i].port = port;
754 }
749} 755}
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 19110ed1c89c..d83392ee6823 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
31 BUG_ON(event >= HA_NUM_EVENTS); 31 BUG_ON(event >= HA_NUM_EVENTS);
32 32
33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, 33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
34 &sas_ha->ha_events[event], sas_ha->core.shost); 34 &sas_ha->ha_events[event].work, sas_ha->core.shost);
35} 35}
36 36
37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) 37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
@@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
41 BUG_ON(event >= PORT_NUM_EVENTS); 41 BUG_ON(event >= PORT_NUM_EVENTS);
42 42
43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, 43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
44 &phy->port_events[event], ha->core.shost); 44 &phy->port_events[event].work, ha->core.shost);
45} 45}
46 46
47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) 47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
@@ -51,12 +51,12 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
51 BUG_ON(event >= PHY_NUM_EVENTS); 51 BUG_ON(event >= PHY_NUM_EVENTS);
52 52
53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, 53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
54 &phy->phy_events[event], ha->core.shost); 54 &phy->phy_events[event].work, ha->core.shost);
55} 55}
56 56
57int sas_init_events(struct sas_ha_struct *sas_ha) 57int sas_init_events(struct sas_ha_struct *sas_ha)
58{ 58{
59 static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = { 59 static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
60 [HAE_RESET] = sas_hae_reset, 60 [HAE_RESET] = sas_hae_reset,
61 }; 61 };
62 62
@@ -64,8 +64,10 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
64 64
65 spin_lock_init(&sas_ha->event_lock); 65 spin_lock_init(&sas_ha->event_lock);
66 66
67 for (i = 0; i < HA_NUM_EVENTS; i++) 67 for (i = 0; i < HA_NUM_EVENTS; i++) {
68 INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha); 68 INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
69 sas_ha->ha_events[i].ha = sas_ha;
70 }
69 71
70 sas_ha->notify_ha_event = notify_ha_event; 72 sas_ha->notify_ha_event = notify_ha_event;
71 sas_ha->notify_port_event = notify_port_event; 73 sas_ha->notify_port_event = notify_port_event;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index e34a93435497..d31e6fa466f7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -597,10 +597,15 @@ static struct domain_device *sas_ex_discover_end_dev(
597 child->iproto = phy->attached_iproto; 597 child->iproto = phy->attached_iproto;
598 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); 598 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
599 sas_hash_addr(child->hashed_sas_addr, child->sas_addr); 599 sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
600 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); 600 if (!phy->port) {
601 BUG_ON(!phy->port); 601 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
602 /* FIXME: better error handling*/ 602 if (unlikely(!phy->port))
603 BUG_ON(sas_port_add(phy->port) != 0); 603 goto out_err;
604 if (unlikely(sas_port_add(phy->port) != 0)) {
605 sas_port_free(phy->port);
606 goto out_err;
607 }
608 }
604 sas_ex_get_linkrate(parent, child, phy); 609 sas_ex_get_linkrate(parent, child, phy);
605 610
606 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) { 611 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
@@ -615,8 +620,7 @@ static struct domain_device *sas_ex_discover_end_dev(
615 SAS_DPRINTK("report phy sata to %016llx:0x%x returned " 620 SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
616 "0x%x\n", SAS_ADDR(parent->sas_addr), 621 "0x%x\n", SAS_ADDR(parent->sas_addr),
617 phy_id, res); 622 phy_id, res);
618 kfree(child); 623 goto out_free;
619 return NULL;
620 } 624 }
621 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis, 625 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
622 sizeof(struct dev_to_host_fis)); 626 sizeof(struct dev_to_host_fis));
@@ -627,14 +631,14 @@ static struct domain_device *sas_ex_discover_end_dev(
627 "%016llx:0x%x returned 0x%x\n", 631 "%016llx:0x%x returned 0x%x\n",
628 SAS_ADDR(child->sas_addr), 632 SAS_ADDR(child->sas_addr),
629 SAS_ADDR(parent->sas_addr), phy_id, res); 633 SAS_ADDR(parent->sas_addr), phy_id, res);
630 kfree(child); 634 goto out_free;
631 return NULL;
632 } 635 }
633 } else if (phy->attached_tproto & SAS_PROTO_SSP) { 636 } else if (phy->attached_tproto & SAS_PROTO_SSP) {
634 child->dev_type = SAS_END_DEV; 637 child->dev_type = SAS_END_DEV;
635 rphy = sas_end_device_alloc(phy->port); 638 rphy = sas_end_device_alloc(phy->port);
636 /* FIXME: error handling */ 639 /* FIXME: error handling */
637 BUG_ON(!rphy); 640 if (unlikely(!rphy))
641 goto out_free;
638 child->tproto = phy->attached_tproto; 642 child->tproto = phy->attached_tproto;
639 sas_init_dev(child); 643 sas_init_dev(child);
640 644
@@ -651,9 +655,7 @@ static struct domain_device *sas_ex_discover_end_dev(
651 "at %016llx:0x%x returned 0x%x\n", 655 "at %016llx:0x%x returned 0x%x\n",
652 SAS_ADDR(child->sas_addr), 656 SAS_ADDR(child->sas_addr),
653 SAS_ADDR(parent->sas_addr), phy_id, res); 657 SAS_ADDR(parent->sas_addr), phy_id, res);
654 /* FIXME: this kfrees list elements without removing them */ 658 goto out_list_del;
655 //kfree(child);
656 return NULL;
657 } 659 }
658 } else { 660 } else {
659 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", 661 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
@@ -663,6 +665,16 @@ static struct domain_device *sas_ex_discover_end_dev(
663 665
664 list_add_tail(&child->siblings, &parent_ex->children); 666 list_add_tail(&child->siblings, &parent_ex->children);
665 return child; 667 return child;
668
669 out_list_del:
670 list_del(&child->dev_list_node);
671 sas_rphy_free(rphy);
672 out_free:
673 sas_port_delete(phy->port);
674 out_err:
675 phy->port = NULL;
676 kfree(child);
677 return NULL;
666} 678}
667 679
668static struct domain_device *sas_ex_discover_expander( 680static struct domain_device *sas_ex_discover_expander(
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index c836a237fb79..d65bc4e0f214 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -65,9 +65,11 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
65 65
66/* ---------- HA events ---------- */ 66/* ---------- HA events ---------- */
67 67
68void sas_hae_reset(void *data) 68void sas_hae_reset(struct work_struct *work)
69{ 69{
70 struct sas_ha_struct *ha = data; 70 struct sas_ha_event *ev =
71 container_of(work, struct sas_ha_event, work);
72 struct sas_ha_struct *ha = ev->ha;
71 73
72 sas_begin_event(HAE_RESET, &ha->event_lock, 74 sas_begin_event(HAE_RESET, &ha->event_lock,
73 &ha->pending); 75 &ha->pending);
@@ -112,6 +114,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
112 } 114 }
113 } 115 }
114 116
117 INIT_LIST_HEAD(&sas_ha->eh_done_q);
118
115 return 0; 119 return 0;
116 120
117Undo_ports: 121Undo_ports:
@@ -142,7 +146,7 @@ static int sas_get_linkerrors(struct sas_phy *phy)
142 return sas_smp_get_phy_events(phy); 146 return sas_smp_get_phy_events(phy);
143} 147}
144 148
145static int sas_phy_reset(struct sas_phy *phy, int hard_reset) 149int sas_phy_reset(struct sas_phy *phy, int hard_reset)
146{ 150{
147 int ret; 151 int ret;
148 enum phy_func reset_type; 152 enum phy_func reset_type;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index bffcee474921..137d7e496b6d 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -60,11 +60,11 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
60 60
61void sas_deform_port(struct asd_sas_phy *phy); 61void sas_deform_port(struct asd_sas_phy *phy);
62 62
63void sas_porte_bytes_dmaed(void *); 63void sas_porte_bytes_dmaed(struct work_struct *work);
64void sas_porte_broadcast_rcvd(void *); 64void sas_porte_broadcast_rcvd(struct work_struct *work);
65void sas_porte_link_reset_err(void *); 65void sas_porte_link_reset_err(struct work_struct *work);
66void sas_porte_timer_event(void *); 66void sas_porte_timer_event(struct work_struct *work);
67void sas_porte_hard_reset(void *); 67void sas_porte_hard_reset(struct work_struct *work);
68 68
69int sas_notify_lldd_dev_found(struct domain_device *); 69int sas_notify_lldd_dev_found(struct domain_device *);
70void sas_notify_lldd_dev_gone(struct domain_device *); 70void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -75,7 +75,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy);
75 75
76struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); 76struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
77 77
78void sas_hae_reset(void *); 78void sas_hae_reset(struct work_struct *work);
79 79
80static inline void sas_queue_event(int event, spinlock_t *lock, 80static inline void sas_queue_event(int event, spinlock_t *lock,
81 unsigned long *pending, 81 unsigned long *pending,
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 9340cdbae4a3..b459c4b635b1 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -30,9 +30,11 @@
30 30
31/* ---------- Phy events ---------- */ 31/* ---------- Phy events ---------- */
32 32
33static void sas_phye_loss_of_signal(void *data) 33static void sas_phye_loss_of_signal(struct work_struct *work)
34{ 34{
35 struct asd_sas_phy *phy = data; 35 struct asd_sas_event *ev =
36 container_of(work, struct asd_sas_event, work);
37 struct asd_sas_phy *phy = ev->phy;
36 38
37 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, 39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
38 &phy->phy_events_pending); 40 &phy->phy_events_pending);
@@ -40,18 +42,22 @@ static void sas_phye_loss_of_signal(void *data)
40 sas_deform_port(phy); 42 sas_deform_port(phy);
41} 43}
42 44
43static void sas_phye_oob_done(void *data) 45static void sas_phye_oob_done(struct work_struct *work)
44{ 46{
45 struct asd_sas_phy *phy = data; 47 struct asd_sas_event *ev =
48 container_of(work, struct asd_sas_event, work);
49 struct asd_sas_phy *phy = ev->phy;
46 50
47 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, 51 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
48 &phy->phy_events_pending); 52 &phy->phy_events_pending);
49 phy->error = 0; 53 phy->error = 0;
50} 54}
51 55
52static void sas_phye_oob_error(void *data) 56static void sas_phye_oob_error(struct work_struct *work)
53{ 57{
54 struct asd_sas_phy *phy = data; 58 struct asd_sas_event *ev =
59 container_of(work, struct asd_sas_event, work);
60 struct asd_sas_phy *phy = ev->phy;
55 struct sas_ha_struct *sas_ha = phy->ha; 61 struct sas_ha_struct *sas_ha = phy->ha;
56 struct asd_sas_port *port = phy->port; 62 struct asd_sas_port *port = phy->port;
57 struct sas_internal *i = 63 struct sas_internal *i =
@@ -80,9 +86,11 @@ static void sas_phye_oob_error(void *data)
80 } 86 }
81} 87}
82 88
83static void sas_phye_spinup_hold(void *data) 89static void sas_phye_spinup_hold(struct work_struct *work)
84{ 90{
85 struct asd_sas_phy *phy = data; 91 struct asd_sas_event *ev =
92 container_of(work, struct asd_sas_event, work);
93 struct asd_sas_phy *phy = ev->phy;
86 struct sas_ha_struct *sas_ha = phy->ha; 94 struct sas_ha_struct *sas_ha = phy->ha;
87 struct sas_internal *i = 95 struct sas_internal *i =
88 to_sas_internal(sas_ha->core.shost->transportt); 96 to_sas_internal(sas_ha->core.shost->transportt);
@@ -100,14 +108,14 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
100{ 108{
101 int i; 109 int i;
102 110
103 static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = { 111 static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
104 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, 112 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
105 [PHYE_OOB_DONE] = sas_phye_oob_done, 113 [PHYE_OOB_DONE] = sas_phye_oob_done,
106 [PHYE_OOB_ERROR] = sas_phye_oob_error, 114 [PHYE_OOB_ERROR] = sas_phye_oob_error,
107 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, 115 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
108 }; 116 };
109 117
110 static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = { 118 static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
111 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, 119 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
112 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, 120 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
113 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, 121 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
@@ -122,13 +130,18 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
122 130
123 phy->error = 0; 131 phy->error = 0;
124 INIT_LIST_HEAD(&phy->port_phy_el); 132 INIT_LIST_HEAD(&phy->port_phy_el);
125 for (k = 0; k < PORT_NUM_EVENTS; k++) 133 for (k = 0; k < PORT_NUM_EVENTS; k++) {
126 INIT_WORK(&phy->port_events[k], sas_port_event_fns[k], 134 INIT_WORK(&phy->port_events[k].work,
127 phy); 135 sas_port_event_fns[k]);
136 phy->port_events[k].phy = phy;
137 }
138
139 for (k = 0; k < PHY_NUM_EVENTS; k++) {
140 INIT_WORK(&phy->phy_events[k].work,
141 sas_phy_event_fns[k]);
142 phy->phy_events[k].phy = phy;
143 }
128 144
129 for (k = 0; k < PHY_NUM_EVENTS; k++)
130 INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
131 phy);
132 phy->port = NULL; 145 phy->port = NULL;
133 phy->ha = sas_ha; 146 phy->ha = sas_ha;
134 spin_lock_init(&phy->frame_rcvd_lock); 147 spin_lock_init(&phy->frame_rcvd_lock);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 253cdcf306a2..971c37ceecb4 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -181,9 +181,11 @@ void sas_deform_port(struct asd_sas_phy *phy)
181 181
182/* ---------- SAS port events ---------- */ 182/* ---------- SAS port events ---------- */
183 183
184void sas_porte_bytes_dmaed(void *data) 184void sas_porte_bytes_dmaed(struct work_struct *work)
185{ 185{
186 struct asd_sas_phy *phy = data; 186 struct asd_sas_event *ev =
187 container_of(work, struct asd_sas_event, work);
188 struct asd_sas_phy *phy = ev->phy;
187 189
188 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, 190 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
189 &phy->port_events_pending); 191 &phy->port_events_pending);
@@ -191,11 +193,13 @@ void sas_porte_bytes_dmaed(void *data)
191 sas_form_port(phy); 193 sas_form_port(phy);
192} 194}
193 195
194void sas_porte_broadcast_rcvd(void *data) 196void sas_porte_broadcast_rcvd(struct work_struct *work)
195{ 197{
198 struct asd_sas_event *ev =
199 container_of(work, struct asd_sas_event, work);
200 struct asd_sas_phy *phy = ev->phy;
196 unsigned long flags; 201 unsigned long flags;
197 u32 prim; 202 u32 prim;
198 struct asd_sas_phy *phy = data;
199 203
200 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, 204 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
201 &phy->port_events_pending); 205 &phy->port_events_pending);
@@ -208,9 +212,11 @@ void sas_porte_broadcast_rcvd(void *data)
208 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); 212 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
209} 213}
210 214
211void sas_porte_link_reset_err(void *data) 215void sas_porte_link_reset_err(struct work_struct *work)
212{ 216{
213 struct asd_sas_phy *phy = data; 217 struct asd_sas_event *ev =
218 container_of(work, struct asd_sas_event, work);
219 struct asd_sas_phy *phy = ev->phy;
214 220
215 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, 221 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
216 &phy->port_events_pending); 222 &phy->port_events_pending);
@@ -218,9 +224,11 @@ void sas_porte_link_reset_err(void *data)
218 sas_deform_port(phy); 224 sas_deform_port(phy);
219} 225}
220 226
221void sas_porte_timer_event(void *data) 227void sas_porte_timer_event(struct work_struct *work)
222{ 228{
223 struct asd_sas_phy *phy = data; 229 struct asd_sas_event *ev =
230 container_of(work, struct asd_sas_event, work);
231 struct asd_sas_phy *phy = ev->phy;
224 232
225 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, 233 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
226 &phy->port_events_pending); 234 &phy->port_events_pending);
@@ -228,9 +236,11 @@ void sas_porte_timer_event(void *data)
228 sas_deform_port(phy); 236 sas_deform_port(phy);
229} 237}
230 238
231void sas_porte_hard_reset(void *data) 239void sas_porte_hard_reset(struct work_struct *work)
232{ 240{
233 struct asd_sas_phy *phy = data; 241 struct asd_sas_event *ev =
242 container_of(work, struct asd_sas_event, work);
243 struct asd_sas_phy *phy = ev->phy;
234 244
235 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, 245 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
236 &phy->port_events_pending); 246 &phy->port_events_pending);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index e46e79355b77..22672d54aa27 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -29,9 +29,11 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi.h> 31#include <scsi/scsi.h>
32#include <scsi/scsi_eh.h>
32#include <scsi/scsi_transport.h> 33#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h> 34#include <scsi/scsi_transport_sas.h>
34#include "../scsi_sas_internal.h" 35#include "../scsi_sas_internal.h"
36#include "../scsi_transport_api.h"
35 37
36#include <linux/err.h> 38#include <linux/err.h>
37#include <linux/blkdev.h> 39#include <linux/blkdev.h>
@@ -46,6 +48,7 @@ static void sas_scsi_task_done(struct sas_task *task)
46{ 48{
47 struct task_status_struct *ts = &task->task_status; 49 struct task_status_struct *ts = &task->task_status;
48 struct scsi_cmnd *sc = task->uldd_task; 50 struct scsi_cmnd *sc = task->uldd_task;
51 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
49 unsigned ts_flags = task->task_state_flags; 52 unsigned ts_flags = task->task_state_flags;
50 int hs = 0, stat = 0; 53 int hs = 0, stat = 0;
51 54
@@ -116,7 +119,7 @@ static void sas_scsi_task_done(struct sas_task *task)
116 sas_free_task(task); 119 sas_free_task(task);
117 /* This is very ugly but this is how SCSI Core works. */ 120 /* This is very ugly but this is how SCSI Core works. */
118 if (ts_flags & SAS_TASK_STATE_ABORTED) 121 if (ts_flags & SAS_TASK_STATE_ABORTED)
119 scsi_finish_command(sc); 122 scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
120 else 123 else
121 sc->scsi_done(sc); 124 sc->scsi_done(sc);
122} 125}
@@ -307,6 +310,15 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
307 spin_unlock_irqrestore(&core->task_queue_lock, flags); 310 spin_unlock_irqrestore(&core->task_queue_lock, flags);
308 } 311 }
309 312
313 spin_lock_irqsave(&task->task_state_lock, flags);
314 if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
315 spin_unlock_irqrestore(&task->task_state_lock, flags);
316 SAS_DPRINTK("%s: task 0x%p already aborted\n",
317 __FUNCTION__, task);
318 return TASK_IS_ABORTED;
319 }
320 spin_unlock_irqrestore(&task->task_state_lock, flags);
321
310 for (i = 0; i < 5; i++) { 322 for (i = 0; i < 5; i++) {
311 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); 323 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
312 res = si->dft->lldd_abort_task(task); 324 res = si->dft->lldd_abort_task(task);
@@ -409,13 +421,16 @@ Again:
409 SAS_DPRINTK("going over list...\n"); 421 SAS_DPRINTK("going over list...\n");
410 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { 422 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
411 struct sas_task *task = TO_SAS_TASK(cmd); 423 struct sas_task *task = TO_SAS_TASK(cmd);
424 list_del_init(&cmd->eh_entry);
412 425
426 if (!task) {
427 SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__);
428 continue;
429 }
413 SAS_DPRINTK("trying to find task 0x%p\n", task); 430 SAS_DPRINTK("trying to find task 0x%p\n", task);
414 list_del_init(&cmd->eh_entry);
415 res = sas_scsi_find_task(task); 431 res = sas_scsi_find_task(task);
416 432
417 cmd->eh_eflags = 0; 433 cmd->eh_eflags = 0;
418 shost->host_failed--;
419 434
420 switch (res) { 435 switch (res) {
421 case TASK_IS_DONE: 436 case TASK_IS_DONE:
@@ -491,6 +506,7 @@ Again:
491 } 506 }
492 } 507 }
493out: 508out:
509 scsi_eh_flush_done_q(&ha->eh_done_q);
494 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); 510 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
495 return; 511 return;
496clear_q: 512clear_q:
@@ -508,12 +524,18 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
508 unsigned long flags; 524 unsigned long flags;
509 525
510 if (!task) { 526 if (!task) {
511 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 527 SAS_DPRINTK("command 0x%p, task 0x%p, gone: EH_HANDLED\n",
512 cmd, task); 528 cmd, task);
513 return EH_HANDLED; 529 return EH_HANDLED;
514 } 530 }
515 531
516 spin_lock_irqsave(&task->task_state_lock, flags); 532 spin_lock_irqsave(&task->task_state_lock, flags);
533 if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
534 spin_unlock_irqrestore(&task->task_state_lock, flags);
535 SAS_DPRINTK("command 0x%p, task 0x%p, aborted by initiator: "
536 "EH_NOT_HANDLED\n", cmd, task);
537 return EH_NOT_HANDLED;
538 }
517 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 539 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
518 spin_unlock_irqrestore(&task->task_state_lock, flags); 540 spin_unlock_irqrestore(&task->task_state_lock, flags);
519 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 541 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
@@ -777,6 +799,66 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
777 spin_unlock_irqrestore(&core->task_queue_lock, flags); 799 spin_unlock_irqrestore(&core->task_queue_lock, flags);
778} 800}
779 801
802static int do_sas_task_abort(struct sas_task *task)
803{
804 struct scsi_cmnd *sc = task->uldd_task;
805 struct sas_internal *si =
806 to_sas_internal(task->dev->port->ha->core.shost->transportt);
807 unsigned long flags;
808 int res;
809
810 spin_lock_irqsave(&task->task_state_lock, flags);
811 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
812 spin_unlock_irqrestore(&task->task_state_lock, flags);
813 SAS_DPRINTK("%s: Task %p already aborted.\n", __FUNCTION__,
814 task);
815 return 0;
816 }
817
818 task->task_state_flags |= SAS_TASK_INITIATOR_ABORTED;
819 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
820 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
821 spin_unlock_irqrestore(&task->task_state_lock, flags);
822
823 if (!si->dft->lldd_abort_task)
824 return -ENODEV;
825
826 res = si->dft->lldd_abort_task(task);
827 if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
828 (res == TMF_RESP_FUNC_COMPLETE))
829 {
830 /* SMP commands don't have scsi_cmds(?) */
831 if (!sc) {
832 task->task_done(task);
833 return 0;
834 }
835 scsi_req_abort_cmd(sc);
836 scsi_schedule_eh(sc->device->host);
837 return 0;
838 }
839
840 spin_lock_irqsave(&task->task_state_lock, flags);
841 task->task_state_flags &= ~SAS_TASK_INITIATOR_ABORTED;
842 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
843 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
844 spin_unlock_irqrestore(&task->task_state_lock, flags);
845
846 return -EAGAIN;
847}
848
849void sas_task_abort(struct work_struct *work)
850{
851 struct sas_task *task =
852 container_of(work, struct sas_task, abort_work);
853 int i;
854
855 for (i = 0; i < 5; i++)
856 if (!do_sas_task_abort(task))
857 return;
858
859 SAS_DPRINTK("%s: Could not kill task!\n", __FUNCTION__);
860}
861
780EXPORT_SYMBOL_GPL(sas_queuecommand); 862EXPORT_SYMBOL_GPL(sas_queuecommand);
781EXPORT_SYMBOL_GPL(sas_target_alloc); 863EXPORT_SYMBOL_GPL(sas_target_alloc);
782EXPORT_SYMBOL_GPL(sas_slave_configure); 864EXPORT_SYMBOL_GPL(sas_slave_configure);
@@ -784,3 +866,5 @@ EXPORT_SYMBOL_GPL(sas_slave_destroy);
784EXPORT_SYMBOL_GPL(sas_change_queue_depth); 866EXPORT_SYMBOL_GPL(sas_change_queue_depth);
785EXPORT_SYMBOL_GPL(sas_change_queue_type); 867EXPORT_SYMBOL_GPL(sas_change_queue_type);
786EXPORT_SYMBOL_GPL(sas_bios_param); 868EXPORT_SYMBOL_GPL(sas_bios_param);
869EXPORT_SYMBOL_GPL(sas_task_abort);
870EXPORT_SYMBOL_GPL(sas_phy_reset);
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
new file mode 100644
index 000000000000..89403b00e042
--- /dev/null
+++ b/drivers/scsi/libsrp.c
@@ -0,0 +1,441 @@
1/*
2 * SCSI RDAM Protocol lib functions
3 *
4 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 */
21#include <linux/err.h>
22#include <linux/kfifo.h>
23#include <linux/scatterlist.h>
24#include <linux/dma-mapping.h>
25#include <linux/pci.h>
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_tcq.h>
29#include <scsi/scsi_tgt.h>
30#include <scsi/srp.h>
31#include <scsi/libsrp.h>
32
33enum srp_task_attributes {
34 SRP_SIMPLE_TASK = 0,
35 SRP_HEAD_TASK = 1,
36 SRP_ORDERED_TASK = 2,
37 SRP_ACA_TASK = 4
38};
39
40/* tmp - will replace with SCSI logging stuff */
41#define eprintk(fmt, args...) \
42do { \
43 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
44} while (0)
45/* #define dprintk eprintk */
46#define dprintk(fmt, args...)
47
48static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
49 struct srp_buf **ring)
50{
51 int i;
52 struct iu_entry *iue;
53
54 q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
55 if (!q->pool)
56 return -ENOMEM;
57 q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
58 if (!q->items)
59 goto free_pool;
60
61 spin_lock_init(&q->lock);
62 q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
63 GFP_KERNEL, &q->lock);
64 if (IS_ERR(q->queue))
65 goto free_item;
66
67 for (i = 0, iue = q->items; i < max; i++) {
68 __kfifo_put(q->queue, (void *) &iue, sizeof(void *));
69 iue->sbuf = ring[i];
70 iue++;
71 }
72 return 0;
73
74free_item:
75 kfree(q->items);
76free_pool:
77 kfree(q->pool);
78 return -ENOMEM;
79}
80
81static void srp_iu_pool_free(struct srp_queue *q)
82{
83 kfree(q->items);
84 kfree(q->pool);
85}
86
87static struct srp_buf **srp_ring_alloc(struct device *dev,
88 size_t max, size_t size)
89{
90 int i;
91 struct srp_buf **ring;
92
93 ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
94 if (!ring)
95 return NULL;
96
97 for (i = 0; i < max; i++) {
98 ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL);
99 if (!ring[i])
100 goto out;
101 ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
102 GFP_KERNEL);
103 if (!ring[i]->buf)
104 goto out;
105 }
106 return ring;
107
108out:
109 for (i = 0; i < max && ring[i]; i++) {
110 if (ring[i]->buf)
111 dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
112 kfree(ring[i]);
113 }
114 kfree(ring);
115
116 return NULL;
117}
118
119static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
120 size_t size)
121{
122 int i;
123
124 for (i = 0; i < max; i++) {
125 dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
126 kfree(ring[i]);
127 }
128}
129
130int srp_target_alloc(struct srp_target *target, struct device *dev,
131 size_t nr, size_t iu_size)
132{
133 int err;
134
135 spin_lock_init(&target->lock);
136 INIT_LIST_HEAD(&target->cmd_queue);
137
138 target->dev = dev;
139 target->dev->driver_data = target;
140
141 target->srp_iu_size = iu_size;
142 target->rx_ring_size = nr;
143 target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
144 if (!target->rx_ring)
145 return -ENOMEM;
146 err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
147 if (err)
148 goto free_ring;
149
150 return 0;
151
152free_ring:
153 srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
154 return -ENOMEM;
155}
156EXPORT_SYMBOL_GPL(srp_target_alloc);
157
158void srp_target_free(struct srp_target *target)
159{
160 srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
161 target->srp_iu_size);
162 srp_iu_pool_free(&target->iu_queue);
163}
164EXPORT_SYMBOL_GPL(srp_target_free);
165
166struct iu_entry *srp_iu_get(struct srp_target *target)
167{
168 struct iu_entry *iue = NULL;
169
170 kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
171 if (!iue)
172 return iue;
173 iue->target = target;
174 INIT_LIST_HEAD(&iue->ilist);
175 iue->flags = 0;
176 return iue;
177}
178EXPORT_SYMBOL_GPL(srp_iu_get);
179
180void srp_iu_put(struct iu_entry *iue)
181{
182 kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
183}
184EXPORT_SYMBOL_GPL(srp_iu_put);
185
186static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
187 enum dma_data_direction dir, srp_rdma_t rdma_io,
188 int dma_map, int ext_desc)
189{
190 struct iu_entry *iue = NULL;
191 struct scatterlist *sg = NULL;
192 int err, nsg = 0, len;
193
194 if (dma_map) {
195 iue = (struct iu_entry *) sc->SCp.ptr;
196 sg = sc->request_buffer;
197
198 dprintk("%p %u %u %d\n", iue, sc->request_bufflen,
199 md->len, sc->use_sg);
200
201 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg,
202 DMA_BIDIRECTIONAL);
203 if (!nsg) {
204 printk("fail to map %p %d\n", iue, sc->use_sg);
205 return 0;
206 }
207 len = min(sc->request_bufflen, md->len);
208 } else
209 len = md->len;
210
211 err = rdma_io(sc, sg, nsg, md, 1, dir, len);
212
213 if (dma_map)
214 dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
215
216 return err;
217}
218
219static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
220 struct srp_indirect_buf *id,
221 enum dma_data_direction dir, srp_rdma_t rdma_io,
222 int dma_map, int ext_desc)
223{
224 struct iu_entry *iue = NULL;
225 struct srp_direct_buf *md = NULL;
226 struct scatterlist dummy, *sg = NULL;
227 dma_addr_t token = 0;
228 long err;
229 unsigned int done = 0;
230 int nmd, nsg = 0, len;
231
232 if (dma_map || ext_desc) {
233 iue = (struct iu_entry *) sc->SCp.ptr;
234 sg = sc->request_buffer;
235
236 dprintk("%p %u %u %d %d\n",
237 iue, sc->request_bufflen, id->len,
238 cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
239 }
240
241 nmd = id->table_desc.len / sizeof(struct srp_direct_buf);
242
243 if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
244 (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
245 md = &id->desc_list[0];
246 goto rdma;
247 }
248
249 if (ext_desc && dma_map) {
250 md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
251 &token, GFP_KERNEL);
252 if (!md) {
253 eprintk("Can't get dma memory %u\n", id->table_desc.len);
254 return -ENOMEM;
255 }
256
257 sg_init_one(&dummy, md, id->table_desc.len);
258 sg_dma_address(&dummy) = token;
259 err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
260 id->table_desc.len);
261 if (err < 0) {
262 eprintk("Error copying indirect table %ld\n", err);
263 goto free_mem;
264 }
265 } else {
266 eprintk("This command uses external indirect buffer\n");
267 return -EINVAL;
268 }
269
270rdma:
271 if (dma_map) {
272 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
273 if (!nsg) {
274 eprintk("fail to map %p %d\n", iue, sc->use_sg);
275 goto free_mem;
276 }
277 len = min(sc->request_bufflen, id->len);
278 } else
279 len = id->len;
280
281 err = rdma_io(sc, sg, nsg, md, nmd, dir, len);
282
283 if (dma_map)
284 dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
285
286free_mem:
287 if (token && dma_map)
288 dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
289
290 return done;
291}
292
293static int data_out_desc_size(struct srp_cmd *cmd)
294{
295 int size = 0;
296 u8 fmt = cmd->buf_fmt >> 4;
297
298 switch (fmt) {
299 case SRP_NO_DATA_DESC:
300 break;
301 case SRP_DATA_DESC_DIRECT:
302 size = sizeof(struct srp_direct_buf);
303 break;
304 case SRP_DATA_DESC_INDIRECT:
305 size = sizeof(struct srp_indirect_buf) +
306 sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
307 break;
308 default:
309 eprintk("client error. Invalid data_out_format %x\n", fmt);
310 break;
311 }
312 return size;
313}
314
315/*
316 * TODO: this can be called multiple times for a single command if it
317 * has very long data.
318 */
319int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
320 srp_rdma_t rdma_io, int dma_map, int ext_desc)
321{
322 struct srp_direct_buf *md;
323 struct srp_indirect_buf *id;
324 enum dma_data_direction dir;
325 int offset, err = 0;
326 u8 format;
327
328 offset = cmd->add_cdb_len * 4;
329
330 dir = srp_cmd_direction(cmd);
331 if (dir == DMA_FROM_DEVICE)
332 offset += data_out_desc_size(cmd);
333
334 if (dir == DMA_TO_DEVICE)
335 format = cmd->buf_fmt >> 4;
336 else
337 format = cmd->buf_fmt & ((1U << 4) - 1);
338
339 switch (format) {
340 case SRP_NO_DATA_DESC:
341 break;
342 case SRP_DATA_DESC_DIRECT:
343 md = (struct srp_direct_buf *)
344 (cmd->add_data + offset);
345 err = srp_direct_data(sc, md, dir, rdma_io, dma_map, ext_desc);
346 break;
347 case SRP_DATA_DESC_INDIRECT:
348 id = (struct srp_indirect_buf *)
349 (cmd->add_data + offset);
350 err = srp_indirect_data(sc, cmd, id, dir, rdma_io, dma_map,
351 ext_desc);
352 break;
353 default:
354 eprintk("Unknown format %d %x\n", dir, format);
355 break;
356 }
357
358 return err;
359}
360EXPORT_SYMBOL_GPL(srp_transfer_data);
361
362static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
363{
364 struct srp_direct_buf *md;
365 struct srp_indirect_buf *id;
366 int len = 0, offset = cmd->add_cdb_len * 4;
367 u8 fmt;
368
369 if (dir == DMA_TO_DEVICE)
370 fmt = cmd->buf_fmt >> 4;
371 else {
372 fmt = cmd->buf_fmt & ((1U << 4) - 1);
373 offset += data_out_desc_size(cmd);
374 }
375
376 switch (fmt) {
377 case SRP_NO_DATA_DESC:
378 break;
379 case SRP_DATA_DESC_DIRECT:
380 md = (struct srp_direct_buf *) (cmd->add_data + offset);
381 len = md->len;
382 break;
383 case SRP_DATA_DESC_INDIRECT:
384 id = (struct srp_indirect_buf *) (cmd->add_data + offset);
385 len = id->len;
386 break;
387 default:
388 eprintk("invalid data format %x\n", fmt);
389 break;
390 }
391 return len;
392}
393
394int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
395 u64 addr)
396{
397 enum dma_data_direction dir;
398 struct scsi_cmnd *sc;
399 int tag, len, err;
400
401 switch (cmd->task_attr) {
402 case SRP_SIMPLE_TASK:
403 tag = MSG_SIMPLE_TAG;
404 break;
405 case SRP_ORDERED_TASK:
406 tag = MSG_ORDERED_TAG;
407 break;
408 case SRP_HEAD_TASK:
409 tag = MSG_HEAD_TAG;
410 break;
411 default:
412 eprintk("Task attribute %d not supported\n", cmd->task_attr);
413 tag = MSG_ORDERED_TAG;
414 }
415
416 dir = srp_cmd_direction(cmd);
417 len = vscsis_data_length(cmd, dir);
418
419 dprintk("%p %x %lx %d %d %d %llx\n", info, cmd->cdb[0],
420 cmd->lun, dir, len, tag, (unsigned long long) cmd->tag);
421
422 sc = scsi_host_get_command(shost, dir, GFP_KERNEL);
423 if (!sc)
424 return -ENOMEM;
425
426 sc->SCp.ptr = info;
427 memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
428 sc->request_bufflen = len;
429 sc->request_buffer = (void *) (unsigned long) addr;
430 sc->tag = tag;
431 err = scsi_tgt_queue_command(sc, (struct scsi_lun *) &cmd->lun, cmd->tag);
432 if (err)
433 scsi_host_put_command(shost, sc);
434
435 return err;
436}
437EXPORT_SYMBOL_GPL(srp_cmd_queue);
438
439MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
440MODULE_AUTHOR("FUJITA Tomonori");
441MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 3f7f5f8abd75..a7de0bca5bdd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -296,13 +296,17 @@ struct lpfc_hba {
296 uint32_t cfg_cr_delay; 296 uint32_t cfg_cr_delay;
297 uint32_t cfg_cr_count; 297 uint32_t cfg_cr_count;
298 uint32_t cfg_multi_ring_support; 298 uint32_t cfg_multi_ring_support;
299 uint32_t cfg_multi_ring_rctl;
300 uint32_t cfg_multi_ring_type;
299 uint32_t cfg_fdmi_on; 301 uint32_t cfg_fdmi_on;
300 uint32_t cfg_discovery_threads; 302 uint32_t cfg_discovery_threads;
301 uint32_t cfg_max_luns; 303 uint32_t cfg_max_luns;
302 uint32_t cfg_poll; 304 uint32_t cfg_poll;
303 uint32_t cfg_poll_tmo; 305 uint32_t cfg_poll_tmo;
306 uint32_t cfg_use_msi;
304 uint32_t cfg_sg_seg_cnt; 307 uint32_t cfg_sg_seg_cnt;
305 uint32_t cfg_sg_dma_buf_size; 308 uint32_t cfg_sg_dma_buf_size;
309 uint64_t cfg_soft_wwnn;
306 uint64_t cfg_soft_wwpn; 310 uint64_t cfg_soft_wwpn;
307 311
308 uint32_t dev_loss_tmo_changed; 312 uint32_t dev_loss_tmo_changed;
@@ -355,7 +359,7 @@ struct lpfc_hba {
355#define VPD_PORT 0x8 /* valid vpd port data */ 359#define VPD_PORT 0x8 /* valid vpd port data */
356#define VPD_MASK 0xf /* mask for any vpd data */ 360#define VPD_MASK 0xf /* mask for any vpd data */
357 361
358 uint8_t soft_wwpn_enable; 362 uint8_t soft_wwn_enable;
359 363
360 struct timer_list fcp_poll_timer; 364 struct timer_list fcp_poll_timer;
361 struct timer_list els_tmofunc; 365 struct timer_list els_tmofunc;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2a4e02e7a392..f247e786af99 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -552,10 +552,10 @@ static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
552static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 552static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
553 553
554 554
555static char *lpfc_soft_wwpn_key = "C99G71SL8032A"; 555static char *lpfc_soft_wwn_key = "C99G71SL8032A";
556 556
557static ssize_t 557static ssize_t
558lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf, 558lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
559 size_t count) 559 size_t count)
560{ 560{
561 struct Scsi_Host *host = class_to_shost(cdev); 561 struct Scsi_Host *host = class_to_shost(cdev);
@@ -579,15 +579,15 @@ lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
579 if (buf[cnt-1] == '\n') 579 if (buf[cnt-1] == '\n')
580 cnt--; 580 cnt--;
581 581
582 if ((cnt != strlen(lpfc_soft_wwpn_key)) || 582 if ((cnt != strlen(lpfc_soft_wwn_key)) ||
583 (strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0)) 583 (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
584 return -EINVAL; 584 return -EINVAL;
585 585
586 phba->soft_wwpn_enable = 1; 586 phba->soft_wwn_enable = 1;
587 return count; 587 return count;
588} 588}
589static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL, 589static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
590 lpfc_soft_wwpn_enable_store); 590 lpfc_soft_wwn_enable_store);
591 591
592static ssize_t 592static ssize_t
593lpfc_soft_wwpn_show(struct class_device *cdev, char *buf) 593lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
@@ -613,12 +613,12 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
613 if (buf[cnt-1] == '\n') 613 if (buf[cnt-1] == '\n')
614 cnt--; 614 cnt--;
615 615
616 if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) || 616 if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
617 ((cnt == 17) && (*buf++ != 'x')) || 617 ((cnt == 17) && (*buf++ != 'x')) ||
618 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) 618 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
619 return -EINVAL; 619 return -EINVAL;
620 620
621 phba->soft_wwpn_enable = 0; 621 phba->soft_wwn_enable = 0;
622 622
623 memset(wwpn, 0, sizeof(wwpn)); 623 memset(wwpn, 0, sizeof(wwpn));
624 624
@@ -639,6 +639,8 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
639 } 639 }
640 phba->cfg_soft_wwpn = wwn_to_u64(wwpn); 640 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
641 fc_host_port_name(host) = phba->cfg_soft_wwpn; 641 fc_host_port_name(host) = phba->cfg_soft_wwpn;
642 if (phba->cfg_soft_wwnn)
643 fc_host_node_name(host) = phba->cfg_soft_wwnn;
642 644
643 dev_printk(KERN_NOTICE, &phba->pcidev->dev, 645 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
644 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); 646 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
@@ -664,6 +666,66 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
664static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ 666static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
665 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); 667 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
666 668
669static ssize_t
670lpfc_soft_wwnn_show(struct class_device *cdev, char *buf)
671{
672 struct Scsi_Host *host = class_to_shost(cdev);
673 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
674 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
675 (unsigned long long)phba->cfg_soft_wwnn);
676}
677
678
679static ssize_t
680lpfc_soft_wwnn_store(struct class_device *cdev, const char *buf, size_t count)
681{
682 struct Scsi_Host *host = class_to_shost(cdev);
683 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
684 unsigned int i, j, cnt=count;
685 u8 wwnn[8];
686
687 /* count may include a LF at end of string */
688 if (buf[cnt-1] == '\n')
689 cnt--;
690
691 if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
692 ((cnt == 17) && (*buf++ != 'x')) ||
693 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
694 return -EINVAL;
695
696 /*
697 * Allow wwnn to be set many times, as long as the enable is set.
698 * However, once the wwpn is set, everything locks.
699 */
700
701 memset(wwnn, 0, sizeof(wwnn));
702
703 /* Validate and store the new name */
704 for (i=0, j=0; i < 16; i++) {
705 if ((*buf >= 'a') && (*buf <= 'f'))
706 j = ((j << 4) | ((*buf++ -'a') + 10));
707 else if ((*buf >= 'A') && (*buf <= 'F'))
708 j = ((j << 4) | ((*buf++ -'A') + 10));
709 else if ((*buf >= '0') && (*buf <= '9'))
710 j = ((j << 4) | (*buf++ -'0'));
711 else
712 return -EINVAL;
713 if (i % 2) {
714 wwnn[i/2] = j & 0xff;
715 j = 0;
716 }
717 }
718 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
719
720 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
721 "lpfc%d: soft_wwnn set. Value will take effect upon "
722 "setting of the soft_wwpn\n", phba->brd_no);
723
724 return count;
725}
726static CLASS_DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
727 lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
728
667 729
668static int lpfc_poll = 0; 730static int lpfc_poll = 0;
669module_param(lpfc_poll, int, 0); 731module_param(lpfc_poll, int, 0);
@@ -802,12 +864,11 @@ static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
802# LOG_MBOX 0x4 Mailbox events 864# LOG_MBOX 0x4 Mailbox events
803# LOG_INIT 0x8 Initialization events 865# LOG_INIT 0x8 Initialization events
804# LOG_LINK_EVENT 0x10 Link events 866# LOG_LINK_EVENT 0x10 Link events
805# LOG_IP 0x20 IP traffic history
806# LOG_FCP 0x40 FCP traffic history 867# LOG_FCP 0x40 FCP traffic history
807# LOG_NODE 0x80 Node table events 868# LOG_NODE 0x80 Node table events
808# LOG_MISC 0x400 Miscellaneous events 869# LOG_MISC 0x400 Miscellaneous events
809# LOG_SLI 0x800 SLI events 870# LOG_SLI 0x800 SLI events
810# LOG_CHK_COND 0x1000 FCP Check condition flag 871# LOG_FCP_ERROR 0x1000 Only log FCP errors
811# LOG_LIBDFC 0x2000 LIBDFC events 872# LOG_LIBDFC 0x2000 LIBDFC events
812# LOG_ALL_MSG 0xffff LOG all messages 873# LOG_ALL_MSG 0xffff LOG all messages
813*/ 874*/
@@ -916,6 +977,22 @@ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
916 "SLI rings to spread IOCB entries across"); 977 "SLI rings to spread IOCB entries across");
917 978
918/* 979/*
980# lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
981# identifies what rctl value to configure the additional ring for.
982# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
983*/
984LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
985 255, "Identifies RCTL for additional ring configuration");
986
987/*
988# lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
989# identifies what type value to configure the additional ring for.
990# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
991*/
992LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1,
993 255, "Identifies TYPE for additional ring configuration");
994
995/*
919# lpfc_fdmi_on: controls FDMI support. 996# lpfc_fdmi_on: controls FDMI support.
920# 0 = no FDMI support 997# 0 = no FDMI support
921# 1 = support FDMI without attribute of hostname 998# 1 = support FDMI without attribute of hostname
@@ -946,6 +1023,15 @@ LPFC_ATTR_R(max_luns, 255, 0, 65535,
946LPFC_ATTR_RW(poll_tmo, 10, 1, 255, 1023LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
947 "Milliseconds driver will wait between polling FCP ring"); 1024 "Milliseconds driver will wait between polling FCP ring");
948 1025
1026/*
1027# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
1028# support this feature
1029# 0 = MSI disabled (default)
1030# 1 = MSI enabled
1031# Value range is [0,1]. Default value is 0.
1032*/
1033LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
1034
949 1035
950struct class_device_attribute *lpfc_host_attrs[] = { 1036struct class_device_attribute *lpfc_host_attrs[] = {
951 &class_device_attr_info, 1037 &class_device_attr_info,
@@ -974,6 +1060,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
974 &class_device_attr_lpfc_cr_delay, 1060 &class_device_attr_lpfc_cr_delay,
975 &class_device_attr_lpfc_cr_count, 1061 &class_device_attr_lpfc_cr_count,
976 &class_device_attr_lpfc_multi_ring_support, 1062 &class_device_attr_lpfc_multi_ring_support,
1063 &class_device_attr_lpfc_multi_ring_rctl,
1064 &class_device_attr_lpfc_multi_ring_type,
977 &class_device_attr_lpfc_fdmi_on, 1065 &class_device_attr_lpfc_fdmi_on,
978 &class_device_attr_lpfc_max_luns, 1066 &class_device_attr_lpfc_max_luns,
979 &class_device_attr_nport_evt_cnt, 1067 &class_device_attr_nport_evt_cnt,
@@ -982,8 +1070,10 @@ struct class_device_attribute *lpfc_host_attrs[] = {
982 &class_device_attr_issue_reset, 1070 &class_device_attr_issue_reset,
983 &class_device_attr_lpfc_poll, 1071 &class_device_attr_lpfc_poll,
984 &class_device_attr_lpfc_poll_tmo, 1072 &class_device_attr_lpfc_poll_tmo,
1073 &class_device_attr_lpfc_use_msi,
1074 &class_device_attr_lpfc_soft_wwnn,
985 &class_device_attr_lpfc_soft_wwpn, 1075 &class_device_attr_lpfc_soft_wwpn,
986 &class_device_attr_lpfc_soft_wwpn_enable, 1076 &class_device_attr_lpfc_soft_wwn_enable,
987 NULL, 1077 NULL,
988}; 1078};
989 1079
@@ -1771,6 +1861,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1771 lpfc_cr_delay_init(phba, lpfc_cr_delay); 1861 lpfc_cr_delay_init(phba, lpfc_cr_delay);
1772 lpfc_cr_count_init(phba, lpfc_cr_count); 1862 lpfc_cr_count_init(phba, lpfc_cr_count);
1773 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); 1863 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
1864 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
1865 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
1774 lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth); 1866 lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth);
1775 lpfc_fcp_class_init(phba, lpfc_fcp_class); 1867 lpfc_fcp_class_init(phba, lpfc_fcp_class);
1776 lpfc_use_adisc_init(phba, lpfc_use_adisc); 1868 lpfc_use_adisc_init(phba, lpfc_use_adisc);
@@ -1782,9 +1874,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1782 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 1874 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1783 lpfc_max_luns_init(phba, lpfc_max_luns); 1875 lpfc_max_luns_init(phba, lpfc_max_luns);
1784 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 1876 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
1877 lpfc_use_msi_init(phba, lpfc_use_msi);
1785 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo); 1878 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
1786 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); 1879 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1787 phba->cfg_poll = lpfc_poll; 1880 phba->cfg_poll = lpfc_poll;
1881 phba->cfg_soft_wwnn = 0L;
1788 phba->cfg_soft_wwpn = 0L; 1882 phba->cfg_soft_wwpn = 0L;
1789 1883
1790 /* 1884 /*
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 3add7c237859..a51a41b7f15d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -558,6 +558,14 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
558 return; 558 return;
559} 559}
560 560
561static void
562lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
563 struct lpfc_iocbq * rspiocb)
564{
565 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
566 return;
567}
568
561void 569void
562lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) 570lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
563{ 571{
@@ -629,6 +637,8 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
629 bpl->tus.f.bdeSize = RNN_REQUEST_SZ; 637 bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
630 else if (cmdcode == SLI_CTNS_RSNN_NN) 638 else if (cmdcode == SLI_CTNS_RSNN_NN)
631 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; 639 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
640 else if (cmdcode == SLI_CTNS_RFF_ID)
641 bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
632 else 642 else
633 bpl->tus.f.bdeSize = 0; 643 bpl->tus.f.bdeSize = 0;
634 bpl->tus.w = le32_to_cpu(bpl->tus.w); 644 bpl->tus.w = le32_to_cpu(bpl->tus.w);
@@ -660,6 +670,17 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
660 cmpl = lpfc_cmpl_ct_cmd_rft_id; 670 cmpl = lpfc_cmpl_ct_cmd_rft_id;
661 break; 671 break;
662 672
673 case SLI_CTNS_RFF_ID:
674 CtReq->CommandResponse.bits.CmdRsp =
675 be16_to_cpu(SLI_CTNS_RFF_ID);
676 CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
677 CtReq->un.rff.feature_res = 0;
678 CtReq->un.rff.feature_tgt = 0;
679 CtReq->un.rff.type_code = FC_FCP_DATA;
680 CtReq->un.rff.feature_init = 1;
681 cmpl = lpfc_cmpl_ct_cmd_rff_id;
682 break;
683
663 case SLI_CTNS_RNN_ID: 684 case SLI_CTNS_RNN_ID:
664 CtReq->CommandResponse.bits.CmdRsp = 685 CtReq->CommandResponse.bits.CmdRsp =
665 be16_to_cpu(SLI_CTNS_RNN_ID); 686 be16_to_cpu(SLI_CTNS_RNN_ID);
@@ -934,7 +955,8 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
934 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); 955 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
935 ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION); 956 ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
936 sprintf(ae->un.OsNameVersion, "%s %s %s", 957 sprintf(ae->un.OsNameVersion, "%s %s %s",
937 init_utsname()->sysname, init_utsname()->release, 958 init_utsname()->sysname,
959 init_utsname()->release,
938 init_utsname()->version); 960 init_utsname()->version);
939 len = strlen(ae->un.OsNameVersion); 961 len = strlen(ae->un.OsNameVersion);
940 len += (len & 3) ? (4 - (len & 3)) : 4; 962 len += (len & 3) ? (4 - (len & 3)) : 4;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 71864cdc6c71..a5f33a0dd4e7 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -243,6 +243,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
243 struct serv_parm *sp, IOCB_t *irsp) 243 struct serv_parm *sp, IOCB_t *irsp)
244{ 244{
245 LPFC_MBOXQ_t *mbox; 245 LPFC_MBOXQ_t *mbox;
246 struct lpfc_dmabuf *mp;
246 int rc; 247 int rc;
247 248
248 spin_lock_irq(phba->host->host_lock); 249 spin_lock_irq(phba->host->host_lock);
@@ -307,10 +308,14 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
307 308
308 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 309 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
309 if (rc == MBX_NOT_FINISHED) 310 if (rc == MBX_NOT_FINISHED)
310 goto fail_free_mbox; 311 goto fail_issue_reg_login;
311 312
312 return 0; 313 return 0;
313 314
315 fail_issue_reg_login:
316 mp = (struct lpfc_dmabuf *) mbox->context1;
317 lpfc_mbuf_free(phba, mp->virt, mp->phys);
318 kfree(mp);
314 fail_free_mbox: 319 fail_free_mbox:
315 mempool_free(mbox, phba->mbox_mem_pool); 320 mempool_free(mbox, phba->mbox_mem_pool);
316 fail: 321 fail:
@@ -657,6 +662,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
657 uint8_t name[sizeof (struct lpfc_name)]; 662 uint8_t name[sizeof (struct lpfc_name)];
658 uint32_t rc; 663 uint32_t rc;
659 664
665 /* Fabric nodes can have the same WWPN so we don't bother searching
666 * by WWPN. Just return the ndlp that was given to us.
667 */
668 if (ndlp->nlp_type & NLP_FABRIC)
669 return ndlp;
670
660 lp = (uint32_t *) prsp->virt; 671 lp = (uint32_t *) prsp->virt;
661 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 672 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
662 memset(name, 0, sizeof (struct lpfc_name)); 673 memset(name, 0, sizeof (struct lpfc_name));
@@ -1122,7 +1133,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1122 mempool_free(mbox, 1133 mempool_free(mbox,
1123 phba->mbox_mem_pool); 1134 phba->mbox_mem_pool);
1124 lpfc_disc_flush_list(phba); 1135 lpfc_disc_flush_list(phba);
1125 psli->ring[(psli->ip_ring)]. 1136 psli->ring[(psli->extra_ring)].
1126 flag &= 1137 flag &=
1127 ~LPFC_STOP_IOCB_EVENT; 1138 ~LPFC_STOP_IOCB_EVENT;
1128 psli->ring[(psli->fcp_ring)]. 1139 psli->ring[(psli->fcp_ring)].
@@ -1851,6 +1862,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1851 IOCB_t *irsp; 1862 IOCB_t *irsp;
1852 struct lpfc_nodelist *ndlp; 1863 struct lpfc_nodelist *ndlp;
1853 LPFC_MBOXQ_t *mbox = NULL; 1864 LPFC_MBOXQ_t *mbox = NULL;
1865 struct lpfc_dmabuf *mp;
1854 1866
1855 irsp = &rspiocb->iocb; 1867 irsp = &rspiocb->iocb;
1856 1868
@@ -1862,6 +1874,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1862 /* Check to see if link went down during discovery */ 1874 /* Check to see if link went down during discovery */
1863 if ((lpfc_els_chk_latt(phba)) || !ndlp) { 1875 if ((lpfc_els_chk_latt(phba)) || !ndlp) {
1864 if (mbox) { 1876 if (mbox) {
1877 mp = (struct lpfc_dmabuf *) mbox->context1;
1878 if (mp) {
1879 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1880 kfree(mp);
1881 }
1865 mempool_free( mbox, phba->mbox_mem_pool); 1882 mempool_free( mbox, phba->mbox_mem_pool);
1866 } 1883 }
1867 goto out; 1884 goto out;
@@ -1893,9 +1910,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1893 } 1910 }
1894 /* NOTE: we should have messages for unsuccessful 1911 /* NOTE: we should have messages for unsuccessful
1895 reglogin */ 1912 reglogin */
1896 mempool_free( mbox, phba->mbox_mem_pool);
1897 } else { 1913 } else {
1898 mempool_free( mbox, phba->mbox_mem_pool);
1899 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ 1914 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
1900 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1915 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1901 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || 1916 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
@@ -1907,6 +1922,12 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1907 } 1922 }
1908 } 1923 }
1909 } 1924 }
1925 mp = (struct lpfc_dmabuf *) mbox->context1;
1926 if (mp) {
1927 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1928 kfree(mp);
1929 }
1930 mempool_free(mbox, phba->mbox_mem_pool);
1910 } 1931 }
1911out: 1932out:
1912 if (ndlp) { 1933 if (ndlp) {
@@ -2644,6 +2665,7 @@ lpfc_els_handle_rscn(struct lpfc_hba * phba)
2644 ndlp->nlp_type |= NLP_FABRIC; 2665 ndlp->nlp_type |= NLP_FABRIC;
2645 ndlp->nlp_prev_state = ndlp->nlp_state; 2666 ndlp->nlp_prev_state = ndlp->nlp_state;
2646 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 2667 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2668 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
2647 lpfc_issue_els_plogi(phba, NameServer_DID, 0); 2669 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
2648 /* Wait for NameServer login cmpl before we can 2670 /* Wait for NameServer login cmpl before we can
2649 continue */ 2671 continue */
@@ -3039,7 +3061,7 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
3039 /* FARP-REQ received from DID <did> */ 3061 /* FARP-REQ received from DID <did> */
3040 lpfc_printf_log(phba, 3062 lpfc_printf_log(phba,
3041 KERN_INFO, 3063 KERN_INFO,
3042 LOG_IP, 3064 LOG_ELS,
3043 "%d:0601 FARP-REQ received from DID x%x\n", 3065 "%d:0601 FARP-REQ received from DID x%x\n",
3044 phba->brd_no, did); 3066 phba->brd_no, did);
3045 3067
@@ -3101,7 +3123,7 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba,
3101 /* FARP-RSP received from DID <did> */ 3123 /* FARP-RSP received from DID <did> */
3102 lpfc_printf_log(phba, 3124 lpfc_printf_log(phba,
3103 KERN_INFO, 3125 KERN_INFO,
3104 LOG_IP, 3126 LOG_ELS,
3105 "%d:0600 FARP-RSP received from DID x%x\n", 3127 "%d:0600 FARP-RSP received from DID x%x\n",
3106 phba->brd_no, did); 3128 phba->brd_no, did);
3107 3129
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 19c79a0549a7..c39564e85e94 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,7 +525,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
525 psli = &phba->sli; 525 psli = &phba->sli;
526 mb = &pmb->mb; 526 mb = &pmb->mb;
527 /* Since we don't do discovery right now, turn these off here */ 527 /* Since we don't do discovery right now, turn these off here */
528 psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 528 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
529 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 529 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
530 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 530 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
531 531
@@ -641,7 +641,7 @@ out:
641 if (rc == MBX_NOT_FINISHED) { 641 if (rc == MBX_NOT_FINISHED) {
642 mempool_free(pmb, phba->mbox_mem_pool); 642 mempool_free(pmb, phba->mbox_mem_pool);
643 lpfc_disc_flush_list(phba); 643 lpfc_disc_flush_list(phba);
644 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 644 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
645 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 645 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
646 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 646 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
647 phba->hba_state = LPFC_HBA_READY; 647 phba->hba_state = LPFC_HBA_READY;
@@ -672,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
672 672
673 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, 673 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
674 sizeof (struct serv_parm)); 674 sizeof (struct serv_parm));
675 if (phba->cfg_soft_wwnn)
676 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
675 if (phba->cfg_soft_wwpn) 677 if (phba->cfg_soft_wwpn)
676 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); 678 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
677 memcpy((uint8_t *) & phba->fc_nodename, 679 memcpy((uint8_t *) & phba->fc_nodename,
@@ -696,7 +698,7 @@ out:
696 == MBX_NOT_FINISHED) { 698 == MBX_NOT_FINISHED) {
697 mempool_free( pmb, phba->mbox_mem_pool); 699 mempool_free( pmb, phba->mbox_mem_pool);
698 lpfc_disc_flush_list(phba); 700 lpfc_disc_flush_list(phba);
699 psli->ring[(psli->ip_ring)].flag &= 701 psli->ring[(psli->extra_ring)].flag &=
700 ~LPFC_STOP_IOCB_EVENT; 702 ~LPFC_STOP_IOCB_EVENT;
701 psli->ring[(psli->fcp_ring)].flag &= 703 psli->ring[(psli->fcp_ring)].flag &=
702 ~LPFC_STOP_IOCB_EVENT; 704 ~LPFC_STOP_IOCB_EVENT;
@@ -715,6 +717,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
715{ 717{
716 int i; 718 int i;
717 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 719 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
720 struct lpfc_dmabuf *mp;
721 int rc;
722
718 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 723 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
719 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 724 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
720 725
@@ -793,16 +798,27 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
793 if (sparam_mbox) { 798 if (sparam_mbox) {
794 lpfc_read_sparam(phba, sparam_mbox); 799 lpfc_read_sparam(phba, sparam_mbox);
795 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 800 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
796 lpfc_sli_issue_mbox(phba, sparam_mbox, 801 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
797 (MBX_NOWAIT | MBX_STOP_IOCB)); 802 (MBX_NOWAIT | MBX_STOP_IOCB));
803 if (rc == MBX_NOT_FINISHED) {
804 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
805 lpfc_mbuf_free(phba, mp->virt, mp->phys);
806 kfree(mp);
807 mempool_free(sparam_mbox, phba->mbox_mem_pool);
808 if (cfglink_mbox)
809 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
810 return;
811 }
798 } 812 }
799 813
800 if (cfglink_mbox) { 814 if (cfglink_mbox) {
801 phba->hba_state = LPFC_LOCAL_CFG_LINK; 815 phba->hba_state = LPFC_LOCAL_CFG_LINK;
802 lpfc_config_link(phba, cfglink_mbox); 816 lpfc_config_link(phba, cfglink_mbox);
803 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 817 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
804 lpfc_sli_issue_mbox(phba, cfglink_mbox, 818 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
805 (MBX_NOWAIT | MBX_STOP_IOCB)); 819 (MBX_NOWAIT | MBX_STOP_IOCB));
820 if (rc == MBX_NOT_FINISHED)
821 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
806 } 822 }
807} 823}
808 824
@@ -1067,6 +1083,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1067 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID); 1083 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
1068 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN); 1084 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
1069 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID); 1085 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
1086 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
1070 } 1087 }
1071 1088
1072 phba->fc_ns_retry = 0; 1089 phba->fc_ns_retry = 0;
@@ -1423,7 +1440,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1423 if (iocb->context1 == (uint8_t *) ndlp) 1440 if (iocb->context1 == (uint8_t *) ndlp)
1424 return 1; 1441 return 1;
1425 } 1442 }
1426 } else if (pring->ringno == psli->ip_ring) { 1443 } else if (pring->ringno == psli->extra_ring) {
1427 1444
1428 } else if (pring->ringno == psli->fcp_ring) { 1445 } else if (pring->ringno == psli->fcp_ring) {
1429 /* Skip match check if waiting to relogin to FCP target */ 1446 /* Skip match check if waiting to relogin to FCP target */
@@ -1680,112 +1697,38 @@ lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1680struct lpfc_nodelist * 1697struct lpfc_nodelist *
1681lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) 1698lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1682{ 1699{
1683 struct lpfc_nodelist *ndlp, *next_ndlp; 1700 struct lpfc_nodelist *ndlp;
1701 struct list_head *lists[]={&phba->fc_nlpunmap_list,
1702 &phba->fc_nlpmap_list,
1703 &phba->fc_plogi_list,
1704 &phba->fc_adisc_list,
1705 &phba->fc_reglogin_list,
1706 &phba->fc_prli_list,
1707 &phba->fc_npr_list,
1708 &phba->fc_unused_list};
1709 uint32_t search[]={NLP_SEARCH_UNMAPPED,
1710 NLP_SEARCH_MAPPED,
1711 NLP_SEARCH_PLOGI,
1712 NLP_SEARCH_ADISC,
1713 NLP_SEARCH_REGLOGIN,
1714 NLP_SEARCH_PRLI,
1715 NLP_SEARCH_NPR,
1716 NLP_SEARCH_UNUSED};
1717 int i;
1684 uint32_t data1; 1718 uint32_t data1;
1685 1719
1686 spin_lock_irq(phba->host->host_lock); 1720 spin_lock_irq(phba->host->host_lock);
1687 if (order & NLP_SEARCH_UNMAPPED) { 1721 for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
1688 list_for_each_entry_safe(ndlp, next_ndlp, 1722 if (!(order & search[i]))
1689 &phba->fc_nlpunmap_list, nlp_listp) { 1723 continue;
1690 if (lpfc_matchdid(phba, ndlp, did)) { 1724 list_for_each_entry(ndlp, lists[i], nlp_listp) {
1691 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1692 ((uint32_t) ndlp->nlp_xri << 16) |
1693 ((uint32_t) ndlp->nlp_type << 8) |
1694 ((uint32_t) ndlp->nlp_rpi & 0xff));
1695 /* FIND node DID unmapped */
1696 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1697 "%d:0929 FIND node DID unmapped"
1698 " Data: x%p x%x x%x x%x\n",
1699 phba->brd_no,
1700 ndlp, ndlp->nlp_DID,
1701 ndlp->nlp_flag, data1);
1702 spin_unlock_irq(phba->host->host_lock);
1703 return ndlp;
1704 }
1705 }
1706 }
1707
1708 if (order & NLP_SEARCH_MAPPED) {
1709 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1710 nlp_listp) {
1711 if (lpfc_matchdid(phba, ndlp, did)) {
1712
1713 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1714 ((uint32_t) ndlp->nlp_xri << 16) |
1715 ((uint32_t) ndlp->nlp_type << 8) |
1716 ((uint32_t) ndlp->nlp_rpi & 0xff));
1717 /* FIND node DID mapped */
1718 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1719 "%d:0930 FIND node DID mapped "
1720 "Data: x%p x%x x%x x%x\n",
1721 phba->brd_no,
1722 ndlp, ndlp->nlp_DID,
1723 ndlp->nlp_flag, data1);
1724 spin_unlock_irq(phba->host->host_lock);
1725 return ndlp;
1726 }
1727 }
1728 }
1729
1730 if (order & NLP_SEARCH_PLOGI) {
1731 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1732 nlp_listp) {
1733 if (lpfc_matchdid(phba, ndlp, did)) {
1734
1735 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1736 ((uint32_t) ndlp->nlp_xri << 16) |
1737 ((uint32_t) ndlp->nlp_type << 8) |
1738 ((uint32_t) ndlp->nlp_rpi & 0xff));
1739 /* LOG change to PLOGI */
1740 /* FIND node DID plogi */
1741 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1742 "%d:0908 FIND node DID plogi "
1743 "Data: x%p x%x x%x x%x\n",
1744 phba->brd_no,
1745 ndlp, ndlp->nlp_DID,
1746 ndlp->nlp_flag, data1);
1747 spin_unlock_irq(phba->host->host_lock);
1748 return ndlp;
1749 }
1750 }
1751 }
1752
1753 if (order & NLP_SEARCH_ADISC) {
1754 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1755 nlp_listp) {
1756 if (lpfc_matchdid(phba, ndlp, did)) {
1757
1758 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1759 ((uint32_t) ndlp->nlp_xri << 16) |
1760 ((uint32_t) ndlp->nlp_type << 8) |
1761 ((uint32_t) ndlp->nlp_rpi & 0xff));
1762 /* LOG change to ADISC */
1763 /* FIND node DID adisc */
1764 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1765 "%d:0931 FIND node DID adisc "
1766 "Data: x%p x%x x%x x%x\n",
1767 phba->brd_no,
1768 ndlp, ndlp->nlp_DID,
1769 ndlp->nlp_flag, data1);
1770 spin_unlock_irq(phba->host->host_lock);
1771 return ndlp;
1772 }
1773 }
1774 }
1775
1776 if (order & NLP_SEARCH_REGLOGIN) {
1777 list_for_each_entry_safe(ndlp, next_ndlp,
1778 &phba->fc_reglogin_list, nlp_listp) {
1779 if (lpfc_matchdid(phba, ndlp, did)) { 1725 if (lpfc_matchdid(phba, ndlp, did)) {
1780
1781 data1 = (((uint32_t) ndlp->nlp_state << 24) | 1726 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1782 ((uint32_t) ndlp->nlp_xri << 16) | 1727 ((uint32_t) ndlp->nlp_xri << 16) |
1783 ((uint32_t) ndlp->nlp_type << 8) | 1728 ((uint32_t) ndlp->nlp_type << 8) |
1784 ((uint32_t) ndlp->nlp_rpi & 0xff)); 1729 ((uint32_t) ndlp->nlp_rpi & 0xff));
1785 /* LOG change to REGLOGIN */
1786 /* FIND node DID reglogin */
1787 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1730 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1788 "%d:0901 FIND node DID reglogin" 1731 "%d:0929 FIND node DID "
1789 " Data: x%p x%x x%x x%x\n", 1732 " Data: x%p x%x x%x x%x\n",
1790 phba->brd_no, 1733 phba->brd_no,
1791 ndlp, ndlp->nlp_DID, 1734 ndlp, ndlp->nlp_DID,
@@ -1795,86 +1738,12 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1795 } 1738 }
1796 } 1739 }
1797 } 1740 }
1798
1799 if (order & NLP_SEARCH_PRLI) {
1800 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1801 nlp_listp) {
1802 if (lpfc_matchdid(phba, ndlp, did)) {
1803
1804 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1805 ((uint32_t) ndlp->nlp_xri << 16) |
1806 ((uint32_t) ndlp->nlp_type << 8) |
1807 ((uint32_t) ndlp->nlp_rpi & 0xff));
1808 /* LOG change to PRLI */
1809 /* FIND node DID prli */
1810 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1811 "%d:0902 FIND node DID prli "
1812 "Data: x%p x%x x%x x%x\n",
1813 phba->brd_no,
1814 ndlp, ndlp->nlp_DID,
1815 ndlp->nlp_flag, data1);
1816 spin_unlock_irq(phba->host->host_lock);
1817 return ndlp;
1818 }
1819 }
1820 }
1821
1822 if (order & NLP_SEARCH_NPR) {
1823 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1824 nlp_listp) {
1825 if (lpfc_matchdid(phba, ndlp, did)) {
1826
1827 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1828 ((uint32_t) ndlp->nlp_xri << 16) |
1829 ((uint32_t) ndlp->nlp_type << 8) |
1830 ((uint32_t) ndlp->nlp_rpi & 0xff));
1831 /* LOG change to NPR */
1832 /* FIND node DID npr */
1833 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1834 "%d:0903 FIND node DID npr "
1835 "Data: x%p x%x x%x x%x\n",
1836 phba->brd_no,
1837 ndlp, ndlp->nlp_DID,
1838 ndlp->nlp_flag, data1);
1839 spin_unlock_irq(phba->host->host_lock);
1840 return ndlp;
1841 }
1842 }
1843 }
1844
1845 if (order & NLP_SEARCH_UNUSED) {
1846 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1847 nlp_listp) {
1848 if (lpfc_matchdid(phba, ndlp, did)) {
1849
1850 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1851 ((uint32_t) ndlp->nlp_xri << 16) |
1852 ((uint32_t) ndlp->nlp_type << 8) |
1853 ((uint32_t) ndlp->nlp_rpi & 0xff));
1854 /* LOG change to UNUSED */
1855 /* FIND node DID unused */
1856 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1857 "%d:0905 FIND node DID unused "
1858 "Data: x%p x%x x%x x%x\n",
1859 phba->brd_no,
1860 ndlp, ndlp->nlp_DID,
1861 ndlp->nlp_flag, data1);
1862 spin_unlock_irq(phba->host->host_lock);
1863 return ndlp;
1864 }
1865 }
1866 }
1867
1868 spin_unlock_irq(phba->host->host_lock); 1741 spin_unlock_irq(phba->host->host_lock);
1869 1742
1870 /* FIND node did <did> NOT FOUND */ 1743 /* FIND node did <did> NOT FOUND */
1871 lpfc_printf_log(phba, 1744 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1872 KERN_INFO,
1873 LOG_NODE,
1874 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n", 1745 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1875 phba->brd_no, did, order); 1746 phba->brd_no, did, order);
1876
1877 /* no match found */
1878 return NULL; 1747 return NULL;
1879} 1748}
1880 1749
@@ -2036,7 +1905,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
2036 if (rc == MBX_NOT_FINISHED) { 1905 if (rc == MBX_NOT_FINISHED) {
2037 mempool_free( mbox, phba->mbox_mem_pool); 1906 mempool_free( mbox, phba->mbox_mem_pool);
2038 lpfc_disc_flush_list(phba); 1907 lpfc_disc_flush_list(phba);
2039 psli->ring[(psli->ip_ring)].flag &= 1908 psli->ring[(psli->extra_ring)].flag &=
2040 ~LPFC_STOP_IOCB_EVENT; 1909 ~LPFC_STOP_IOCB_EVENT;
2041 psli->ring[(psli->fcp_ring)].flag &= 1910 psli->ring[(psli->fcp_ring)].flag &=
2042 ~LPFC_STOP_IOCB_EVENT; 1911 ~LPFC_STOP_IOCB_EVENT;
@@ -2415,7 +2284,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2415 2284
2416 if (clrlaerr) { 2285 if (clrlaerr) {
2417 lpfc_disc_flush_list(phba); 2286 lpfc_disc_flush_list(phba);
2418 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2287 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2419 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2288 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2420 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2289 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2421 phba->hba_state = LPFC_HBA_READY; 2290 phba->hba_state = LPFC_HBA_READY;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index eedf98801366..f79cb6136906 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -42,14 +42,14 @@
42#define FCELSSIZE 1024 /* maximum ELS transfer size */ 42#define FCELSSIZE 1024 /* maximum ELS transfer size */
43 43
44#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */ 44#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
45#define LPFC_IP_RING 1 /* ring 1 for IP commands */ 45#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
46#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ 46#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
47#define LPFC_FCP_NEXT_RING 3 47#define LPFC_FCP_NEXT_RING 3
48 48
49#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ 49#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
50#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ 50#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
51#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 IP command ring entries */ 51#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 extra command ring entries */
52#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 IP response ring entries */ 52#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 extra response ring entries */
53#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */ 53#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */
54#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */ 54#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */
55#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */ 55#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */
@@ -121,6 +121,20 @@ struct lpfc_sli_ct_request {
121 121
122 uint32_t rsvd[7]; 122 uint32_t rsvd[7];
123 } rft; 123 } rft;
124 struct rff {
125 uint32_t PortId;
126 uint8_t reserved[2];
127#ifdef __BIG_ENDIAN_BITFIELD
128 uint8_t feature_res:6;
129 uint8_t feature_init:1;
130 uint8_t feature_tgt:1;
131#else /* __LITTLE_ENDIAN_BITFIELD */
132 uint8_t feature_tgt:1;
133 uint8_t feature_init:1;
134 uint8_t feature_res:6;
135#endif
136 uint8_t type_code; /* type=8 for FCP */
137 } rff;
124 struct rnn { 138 struct rnn {
125 uint32_t PortId; /* For RNN_ID requests */ 139 uint32_t PortId; /* For RNN_ID requests */
126 uint8_t wwnn[8]; 140 uint8_t wwnn[8];
@@ -136,6 +150,7 @@ struct lpfc_sli_ct_request {
136#define SLI_CT_REVISION 1 150#define SLI_CT_REVISION 1
137#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260) 151#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
138#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228) 152#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
153#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235)
139#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252) 154#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
140#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request)) 155#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
141 156
@@ -225,6 +240,7 @@ struct lpfc_sli_ct_request {
225#define SLI_CTNS_RNN_ID 0x0213 240#define SLI_CTNS_RNN_ID 0x0213
226#define SLI_CTNS_RCS_ID 0x0214 241#define SLI_CTNS_RCS_ID 0x0214
227#define SLI_CTNS_RFT_ID 0x0217 242#define SLI_CTNS_RFT_ID 0x0217
243#define SLI_CTNS_RFF_ID 0x021F
228#define SLI_CTNS_RSPN_ID 0x0218 244#define SLI_CTNS_RSPN_ID 0x0218
229#define SLI_CTNS_RPT_ID 0x021A 245#define SLI_CTNS_RPT_ID 0x021A
230#define SLI_CTNS_RIP_NN 0x0235 246#define SLI_CTNS_RIP_NN 0x0235
@@ -1089,12 +1105,6 @@ typedef struct {
1089#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1105#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1090#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1106#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1091 1107
1092#define PCI_SUBSYSTEM_ID_LP11000S 0xfc11
1093#define PCI_SUBSYSTEM_ID_LP11002S 0xfc12
1094#define PCI_SUBSYSTEM_ID_LPE11000S 0xfc21
1095#define PCI_SUBSYSTEM_ID_LPE11002S 0xfc22
1096#define PCI_SUBSYSTEM_ID_LPE11010S 0xfc2A
1097
1098#define JEDEC_ID_ADDRESS 0x0080001c 1108#define JEDEC_ID_ADDRESS 0x0080001c
1099#define FIREFLY_JEDEC_ID 0x1ACC 1109#define FIREFLY_JEDEC_ID 0x1ACC
1100#define SUPERFLY_JEDEC_ID 0x0020 1110#define SUPERFLY_JEDEC_ID 0x0020
@@ -1284,6 +1294,10 @@ typedef struct { /* FireFly BIU registers */
1284#define CMD_FCP_IREAD_CX 0x1B 1294#define CMD_FCP_IREAD_CX 0x1B
1285#define CMD_FCP_ICMND_CR 0x1C 1295#define CMD_FCP_ICMND_CR 0x1C
1286#define CMD_FCP_ICMND_CX 0x1D 1296#define CMD_FCP_ICMND_CX 0x1D
1297#define CMD_FCP_TSEND_CX 0x1F
1298#define CMD_FCP_TRECEIVE_CX 0x21
1299#define CMD_FCP_TRSP_CX 0x23
1300#define CMD_FCP_AUTO_TRSP_CX 0x29
1287 1301
1288#define CMD_ADAPTER_MSG 0x20 1302#define CMD_ADAPTER_MSG 0x20
1289#define CMD_ADAPTER_DUMP 0x22 1303#define CMD_ADAPTER_DUMP 0x22
@@ -1310,6 +1324,9 @@ typedef struct { /* FireFly BIU registers */
1310#define CMD_FCP_IREAD64_CX 0x9B 1324#define CMD_FCP_IREAD64_CX 0x9B
1311#define CMD_FCP_ICMND64_CR 0x9C 1325#define CMD_FCP_ICMND64_CR 0x9C
1312#define CMD_FCP_ICMND64_CX 0x9D 1326#define CMD_FCP_ICMND64_CX 0x9D
1327#define CMD_FCP_TSEND64_CX 0x9F
1328#define CMD_FCP_TRECEIVE64_CX 0xA1
1329#define CMD_FCP_TRSP64_CX 0xA3
1313 1330
1314#define CMD_GEN_REQUEST64_CR 0xC2 1331#define CMD_GEN_REQUEST64_CR 0xC2
1315#define CMD_GEN_REQUEST64_CX 0xC3 1332#define CMD_GEN_REQUEST64_CX 0xC3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a5723ad0a099..afca45cdbcef 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -268,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
268 kfree(mp); 268 kfree(mp);
269 pmb->context1 = NULL; 269 pmb->context1 = NULL;
270 270
271 if (phba->cfg_soft_wwnn)
272 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
271 if (phba->cfg_soft_wwpn) 273 if (phba->cfg_soft_wwpn)
272 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); 274 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
273 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 275 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
@@ -349,8 +351,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
349 phba->hba_state = LPFC_LINK_DOWN; 351 phba->hba_state = LPFC_LINK_DOWN;
350 352
351 /* Only process IOCBs on ring 0 till hba_state is READY */ 353 /* Only process IOCBs on ring 0 till hba_state is READY */
352 if (psli->ring[psli->ip_ring].cmdringaddr) 354 if (psli->ring[psli->extra_ring].cmdringaddr)
353 psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT; 355 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
354 if (psli->ring[psli->fcp_ring].cmdringaddr) 356 if (psli->ring[psli->fcp_ring].cmdringaddr)
355 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 357 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
356 if (psli->ring[psli->next_ring].cmdringaddr) 358 if (psli->ring[psli->next_ring].cmdringaddr)
@@ -517,7 +519,8 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
517 struct lpfc_sli_ring *pring; 519 struct lpfc_sli_ring *pring;
518 uint32_t event_data; 520 uint32_t event_data;
519 521
520 if (phba->work_hs & HS_FFER6) { 522 if (phba->work_hs & HS_FFER6 ||
523 phba->work_hs & HS_FFER5) {
521 /* Re-establishing Link */ 524 /* Re-establishing Link */
522 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 525 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
523 "%d:1301 Re-establishing Link " 526 "%d:1301 Re-establishing Link "
@@ -611,7 +614,7 @@ lpfc_handle_latt(struct lpfc_hba * phba)
611 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 614 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
612 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 615 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
613 if (rc == MBX_NOT_FINISHED) 616 if (rc == MBX_NOT_FINISHED)
614 goto lpfc_handle_latt_free_mp; 617 goto lpfc_handle_latt_free_mbuf;
615 618
616 /* Clear Link Attention in HA REG */ 619 /* Clear Link Attention in HA REG */
617 spin_lock_irq(phba->host->host_lock); 620 spin_lock_irq(phba->host->host_lock);
@@ -621,6 +624,8 @@ lpfc_handle_latt(struct lpfc_hba * phba)
621 624
622 return; 625 return;
623 626
627lpfc_handle_latt_free_mbuf:
628 lpfc_mbuf_free(phba, mp->virt, mp->phys);
624lpfc_handle_latt_free_mp: 629lpfc_handle_latt_free_mp:
625 kfree(mp); 630 kfree(mp);
626lpfc_handle_latt_free_pmb: 631lpfc_handle_latt_free_pmb:
@@ -802,19 +807,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
802{ 807{
803 lpfc_vpd_t *vp; 808 lpfc_vpd_t *vp;
804 uint16_t dev_id = phba->pcidev->device; 809 uint16_t dev_id = phba->pcidev->device;
805 uint16_t dev_subid = phba->pcidev->subsystem_device;
806 uint8_t hdrtype;
807 int max_speed; 810 int max_speed;
808 char * ports;
809 struct { 811 struct {
810 char * name; 812 char * name;
811 int max_speed; 813 int max_speed;
812 char * ports;
813 char * bus; 814 char * bus;
814 } m = {"<Unknown>", 0, "", ""}; 815 } m = {"<Unknown>", 0, ""};
815 816
816 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
817 ports = (hdrtype == 0x80) ? "2-port " : "";
818 if (mdp && mdp[0] != '\0' 817 if (mdp && mdp[0] != '\0'
819 && descp && descp[0] != '\0') 818 && descp && descp[0] != '\0')
820 return; 819 return;
@@ -834,130 +833,93 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
834 833
835 switch (dev_id) { 834 switch (dev_id) {
836 case PCI_DEVICE_ID_FIREFLY: 835 case PCI_DEVICE_ID_FIREFLY:
837 m = (typeof(m)){"LP6000", max_speed, "", "PCI"}; 836 m = (typeof(m)){"LP6000", max_speed, "PCI"};
838 break; 837 break;
839 case PCI_DEVICE_ID_SUPERFLY: 838 case PCI_DEVICE_ID_SUPERFLY:
840 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 839 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
841 m = (typeof(m)){"LP7000", max_speed, "", "PCI"}; 840 m = (typeof(m)){"LP7000", max_speed, "PCI"};
842 else 841 else
843 m = (typeof(m)){"LP7000E", max_speed, "", "PCI"}; 842 m = (typeof(m)){"LP7000E", max_speed, "PCI"};
844 break; 843 break;
845 case PCI_DEVICE_ID_DRAGONFLY: 844 case PCI_DEVICE_ID_DRAGONFLY:
846 m = (typeof(m)){"LP8000", max_speed, "", "PCI"}; 845 m = (typeof(m)){"LP8000", max_speed, "PCI"};
847 break; 846 break;
848 case PCI_DEVICE_ID_CENTAUR: 847 case PCI_DEVICE_ID_CENTAUR:
849 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 848 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
850 m = (typeof(m)){"LP9002", max_speed, "", "PCI"}; 849 m = (typeof(m)){"LP9002", max_speed, "PCI"};
851 else 850 else
852 m = (typeof(m)){"LP9000", max_speed, "", "PCI"}; 851 m = (typeof(m)){"LP9000", max_speed, "PCI"};
853 break; 852 break;
854 case PCI_DEVICE_ID_RFLY: 853 case PCI_DEVICE_ID_RFLY:
855 m = (typeof(m)){"LP952", max_speed, "", "PCI"}; 854 m = (typeof(m)){"LP952", max_speed, "PCI"};
856 break; 855 break;
857 case PCI_DEVICE_ID_PEGASUS: 856 case PCI_DEVICE_ID_PEGASUS:
858 m = (typeof(m)){"LP9802", max_speed, "", "PCI-X"}; 857 m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
859 break; 858 break;
860 case PCI_DEVICE_ID_THOR: 859 case PCI_DEVICE_ID_THOR:
861 if (hdrtype == 0x80) 860 m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
862 m = (typeof(m)){"LP10000DC",
863 max_speed, ports, "PCI-X"};
864 else
865 m = (typeof(m)){"LP10000",
866 max_speed, ports, "PCI-X"};
867 break; 861 break;
868 case PCI_DEVICE_ID_VIPER: 862 case PCI_DEVICE_ID_VIPER:
869 m = (typeof(m)){"LPX1000", max_speed, "", "PCI-X"}; 863 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"};
870 break; 864 break;
871 case PCI_DEVICE_ID_PFLY: 865 case PCI_DEVICE_ID_PFLY:
872 m = (typeof(m)){"LP982", max_speed, "", "PCI-X"}; 866 m = (typeof(m)){"LP982", max_speed, "PCI-X"};
873 break; 867 break;
874 case PCI_DEVICE_ID_TFLY: 868 case PCI_DEVICE_ID_TFLY:
875 if (hdrtype == 0x80) 869 m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
876 m = (typeof(m)){"LP1050DC", max_speed, ports, "PCI-X"};
877 else
878 m = (typeof(m)){"LP1050", max_speed, ports, "PCI-X"};
879 break; 870 break;
880 case PCI_DEVICE_ID_HELIOS: 871 case PCI_DEVICE_ID_HELIOS:
881 if (hdrtype == 0x80) 872 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
882 m = (typeof(m)){"LP11002", max_speed, ports, "PCI-X2"};
883 else
884 m = (typeof(m)){"LP11000", max_speed, ports, "PCI-X2"};
885 break; 873 break;
886 case PCI_DEVICE_ID_HELIOS_SCSP: 874 case PCI_DEVICE_ID_HELIOS_SCSP:
887 m = (typeof(m)){"LP11000-SP", max_speed, ports, "PCI-X2"}; 875 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
888 break; 876 break;
889 case PCI_DEVICE_ID_HELIOS_DCSP: 877 case PCI_DEVICE_ID_HELIOS_DCSP:
890 m = (typeof(m)){"LP11002-SP", max_speed, ports, "PCI-X2"}; 878 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
891 break; 879 break;
892 case PCI_DEVICE_ID_NEPTUNE: 880 case PCI_DEVICE_ID_NEPTUNE:
893 if (hdrtype == 0x80) 881 m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
894 m = (typeof(m)){"LPe1002", max_speed, ports, "PCIe"};
895 else
896 m = (typeof(m)){"LPe1000", max_speed, ports, "PCIe"};
897 break; 882 break;
898 case PCI_DEVICE_ID_NEPTUNE_SCSP: 883 case PCI_DEVICE_ID_NEPTUNE_SCSP:
899 m = (typeof(m)){"LPe1000-SP", max_speed, ports, "PCIe"}; 884 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
900 break; 885 break;
901 case PCI_DEVICE_ID_NEPTUNE_DCSP: 886 case PCI_DEVICE_ID_NEPTUNE_DCSP:
902 m = (typeof(m)){"LPe1002-SP", max_speed, ports, "PCIe"}; 887 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
903 break; 888 break;
904 case PCI_DEVICE_ID_BMID: 889 case PCI_DEVICE_ID_BMID:
905 m = (typeof(m)){"LP1150", max_speed, ports, "PCI-X2"}; 890 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
906 break; 891 break;
907 case PCI_DEVICE_ID_BSMB: 892 case PCI_DEVICE_ID_BSMB:
908 m = (typeof(m)){"LP111", max_speed, ports, "PCI-X2"}; 893 m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
909 break; 894 break;
910 case PCI_DEVICE_ID_ZEPHYR: 895 case PCI_DEVICE_ID_ZEPHYR:
911 if (hdrtype == 0x80) 896 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
912 m = (typeof(m)){"LPe11002", max_speed, ports, "PCIe"};
913 else
914 m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"};
915 break; 897 break;
916 case PCI_DEVICE_ID_ZEPHYR_SCSP: 898 case PCI_DEVICE_ID_ZEPHYR_SCSP:
917 m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"}; 899 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
918 break; 900 break;
919 case PCI_DEVICE_ID_ZEPHYR_DCSP: 901 case PCI_DEVICE_ID_ZEPHYR_DCSP:
920 m = (typeof(m)){"LPe11002-SP", max_speed, ports, "PCIe"}; 902 m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
921 break; 903 break;
922 case PCI_DEVICE_ID_ZMID: 904 case PCI_DEVICE_ID_ZMID:
923 m = (typeof(m)){"LPe1150", max_speed, ports, "PCIe"}; 905 m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
924 break; 906 break;
925 case PCI_DEVICE_ID_ZSMB: 907 case PCI_DEVICE_ID_ZSMB:
926 m = (typeof(m)){"LPe111", max_speed, ports, "PCIe"}; 908 m = (typeof(m)){"LPe111", max_speed, "PCIe"};
927 break; 909 break;
928 case PCI_DEVICE_ID_LP101: 910 case PCI_DEVICE_ID_LP101:
929 m = (typeof(m)){"LP101", max_speed, ports, "PCI-X"}; 911 m = (typeof(m)){"LP101", max_speed, "PCI-X"};
930 break; 912 break;
931 case PCI_DEVICE_ID_LP10000S: 913 case PCI_DEVICE_ID_LP10000S:
932 m = (typeof(m)){"LP10000-S", max_speed, ports, "PCI"}; 914 m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
933 break; 915 break;
934 case PCI_DEVICE_ID_LP11000S: 916 case PCI_DEVICE_ID_LP11000S:
917 m = (typeof(m)){"LP11000-S", max_speed,
918 "PCI-X2"};
919 break;
935 case PCI_DEVICE_ID_LPE11000S: 920 case PCI_DEVICE_ID_LPE11000S:
936 switch (dev_subid) { 921 m = (typeof(m)){"LPe11000-S", max_speed,
937 case PCI_SUBSYSTEM_ID_LP11000S: 922 "PCIe"};
938 m = (typeof(m)){"LP11000-S", max_speed,
939 ports, "PCI-X2"};
940 break;
941 case PCI_SUBSYSTEM_ID_LP11002S:
942 m = (typeof(m)){"LP11002-S", max_speed,
943 ports, "PCI-X2"};
944 break;
945 case PCI_SUBSYSTEM_ID_LPE11000S:
946 m = (typeof(m)){"LPe11000-S", max_speed,
947 ports, "PCIe"};
948 break;
949 case PCI_SUBSYSTEM_ID_LPE11002S:
950 m = (typeof(m)){"LPe11002-S", max_speed,
951 ports, "PCIe"};
952 break;
953 case PCI_SUBSYSTEM_ID_LPE11010S:
954 m = (typeof(m)){"LPe11010-S", max_speed,
955 "10-port ", "PCIe"};
956 break;
957 default:
958 m = (typeof(m)){ NULL };
959 break;
960 }
961 break; 923 break;
962 default: 924 default:
963 m = (typeof(m)){ NULL }; 925 m = (typeof(m)){ NULL };
@@ -968,8 +930,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
968 snprintf(mdp, 79,"%s", m.name); 930 snprintf(mdp, 79,"%s", m.name);
969 if (descp && descp[0] == '\0') 931 if (descp && descp[0] == '\0')
970 snprintf(descp, 255, 932 snprintf(descp, 255,
971 "Emulex %s %dGb %s%s Fibre Channel Adapter", 933 "Emulex %s %dGb %s Fibre Channel Adapter",
972 m.name, m.max_speed, m.ports, m.bus); 934 m.name, m.max_speed, m.bus);
973} 935}
974 936
975/**************************************************/ 937/**************************************************/
@@ -1651,6 +1613,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1651 if (error) 1613 if (error)
1652 goto out_remove_host; 1614 goto out_remove_host;
1653 1615
1616 if (phba->cfg_use_msi) {
1617 error = pci_enable_msi(phba->pcidev);
1618 if (error)
1619 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 "
1620 "Enable MSI failed, continuing with "
1621 "IRQ\n", phba->brd_no);
1622 }
1623
1654 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 1624 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
1655 LPFC_DRIVER_NAME, phba); 1625 LPFC_DRIVER_NAME, phba);
1656 if (error) { 1626 if (error) {
@@ -1730,6 +1700,7 @@ out_free_irq:
1730 lpfc_stop_timer(phba); 1700 lpfc_stop_timer(phba);
1731 phba->work_hba_events = 0; 1701 phba->work_hba_events = 0;
1732 free_irq(phba->pcidev->irq, phba); 1702 free_irq(phba->pcidev->irq, phba);
1703 pci_disable_msi(phba->pcidev);
1733out_free_sysfs_attr: 1704out_free_sysfs_attr:
1734 lpfc_free_sysfs_attr(phba); 1705 lpfc_free_sysfs_attr(phba);
1735out_remove_host: 1706out_remove_host:
@@ -1796,6 +1767,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1796 1767
1797 /* Release the irq reservation */ 1768 /* Release the irq reservation */
1798 free_irq(phba->pcidev->irq, phba); 1769 free_irq(phba->pcidev->irq, phba);
1770 pci_disable_msi(phba->pcidev);
1799 1771
1800 lpfc_cleanup(phba, 0); 1772 lpfc_cleanup(phba, 0);
1801 lpfc_stop_timer(phba); 1773 lpfc_stop_timer(phba);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 62c8ca862e9e..438cbcd9eb13 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -28,7 +28,7 @@
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x80 /* Node table events */
29#define LOG_MISC 0x400 /* Miscellaneous events */ 29#define LOG_MISC 0x400 /* Miscellaneous events */
30#define LOG_SLI 0x800 /* SLI events */ 30#define LOG_SLI 0x800 /* SLI events */
31#define LOG_CHK_COND 0x1000 /* FCP Check condition flag */ 31#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
32#define LOG_LIBDFC 0x2000 /* Libdfc events */ 32#define LOG_LIBDFC 0x2000 /* Libdfc events */
33#define LOG_ALL_MSG 0xffff /* LOG all messages */ 33#define LOG_ALL_MSG 0xffff /* LOG all messages */
34 34
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d5f415007db2..0c7e731dc45a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -739,7 +739,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
739 uint32_t evt) 739 uint32_t evt)
740{ 740{
741 struct lpfc_iocbq *cmdiocb, *rspiocb; 741 struct lpfc_iocbq *cmdiocb, *rspiocb;
742 struct lpfc_dmabuf *pcmd, *prsp; 742 struct lpfc_dmabuf *pcmd, *prsp, *mp;
743 uint32_t *lp; 743 uint32_t *lp;
744 IOCB_t *irsp; 744 IOCB_t *irsp;
745 struct serv_parm *sp; 745 struct serv_parm *sp;
@@ -829,6 +829,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
829 NLP_REGLOGIN_LIST); 829 NLP_REGLOGIN_LIST);
830 return ndlp->nlp_state; 830 return ndlp->nlp_state;
831 } 831 }
832 mp = (struct lpfc_dmabuf *)mbox->context1;
833 lpfc_mbuf_free(phba, mp->virt, mp->phys);
834 kfree(mp);
832 mempool_free(mbox, phba->mbox_mem_pool); 835 mempool_free(mbox, phba->mbox_mem_pool);
833 } else { 836 } else {
834 mempool_free(mbox, phba->mbox_mem_pool); 837 mempool_free(mbox, phba->mbox_mem_pool);
@@ -1620,8 +1623,8 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1620 * or discovery in progress for this node. Starting discovery 1623 * or discovery in progress for this node. Starting discovery
1621 * here will affect the counting of discovery threads. 1624 * here will affect the counting of discovery threads.
1622 */ 1625 */
1623 if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) && 1626 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1624 (ndlp->nlp_flag & NLP_NPR_2B_DISC)){ 1627 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
1625 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1628 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1626 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1629 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1627 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1630 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 97ae98dc95d0..c3e68e0d8f74 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -297,8 +297,10 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
297 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm; 297 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
298 uint32_t resp_info = fcprsp->rspStatus2; 298 uint32_t resp_info = fcprsp->rspStatus2;
299 uint32_t scsi_status = fcprsp->rspStatus3; 299 uint32_t scsi_status = fcprsp->rspStatus3;
300 uint32_t *lp;
300 uint32_t host_status = DID_OK; 301 uint32_t host_status = DID_OK;
301 uint32_t rsplen = 0; 302 uint32_t rsplen = 0;
303 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
302 304
303 /* 305 /*
304 * If this is a task management command, there is no 306 * If this is a task management command, there is no
@@ -310,10 +312,25 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
310 goto out; 312 goto out;
311 } 313 }
312 314
313 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 315 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
314 "%d:0730 FCP command failed: RSP " 316 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
315 "Data: x%x x%x x%x x%x x%x x%x\n", 317 if (snslen > SCSI_SENSE_BUFFERSIZE)
316 phba->brd_no, resp_info, scsi_status, 318 snslen = SCSI_SENSE_BUFFERSIZE;
319
320 if (resp_info & RSP_LEN_VALID)
321 rsplen = be32_to_cpu(fcprsp->rspRspLen);
322 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
323 }
324 lp = (uint32_t *)cmnd->sense_buffer;
325
326 if (!scsi_status && (resp_info & RESID_UNDER))
327 logit = LOG_FCP;
328
329 lpfc_printf_log(phba, KERN_WARNING, logit,
330 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
331 "Data: x%x x%x x%x x%x x%x\n",
332 phba->brd_no, cmnd->cmnd[0], scsi_status,
333 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
317 be32_to_cpu(fcprsp->rspResId), 334 be32_to_cpu(fcprsp->rspResId),
318 be32_to_cpu(fcprsp->rspSnsLen), 335 be32_to_cpu(fcprsp->rspSnsLen),
319 be32_to_cpu(fcprsp->rspRspLen), 336 be32_to_cpu(fcprsp->rspRspLen),
@@ -328,14 +345,6 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
328 } 345 }
329 } 346 }
330 347
331 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
332 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
333 if (snslen > SCSI_SENSE_BUFFERSIZE)
334 snslen = SCSI_SENSE_BUFFERSIZE;
335
336 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
337 }
338
339 cmnd->resid = 0; 348 cmnd->resid = 0;
340 if (resp_info & RESID_UNDER) { 349 if (resp_info & RESID_UNDER) {
341 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 350 cmnd->resid = be32_to_cpu(fcprsp->rspResId);
@@ -378,7 +387,7 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
378 */ 387 */
379 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 388 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
380 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 389 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
381 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 390 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
382 "%d:0734 FCP Read Check Error Data: " 391 "%d:0734 FCP Read Check Error Data: "
383 "x%x x%x x%x x%x\n", phba->brd_no, 392 "x%x x%x x%x x%x\n", phba->brd_no,
384 be32_to_cpu(fcpcmd->fcpDl), 393 be32_to_cpu(fcpcmd->fcpDl),
@@ -670,6 +679,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
670 struct lpfc_iocbq *iocbqrsp; 679 struct lpfc_iocbq *iocbqrsp;
671 int ret; 680 int ret;
672 681
682 if (!rdata->pnode)
683 return FAILED;
684
673 lpfc_cmd->rdata = rdata; 685 lpfc_cmd->rdata = rdata;
674 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, 686 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
675 FCP_TARGET_RESET); 687 FCP_TARGET_RESET);
@@ -976,20 +988,34 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
976 988
977 lpfc_block_error_handler(cmnd); 989 lpfc_block_error_handler(cmnd);
978 spin_lock_irq(shost->host_lock); 990 spin_lock_irq(shost->host_lock);
991 loopcnt = 0;
979 /* 992 /*
980 * If target is not in a MAPPED state, delay the reset until 993 * If target is not in a MAPPED state, delay the reset until
981 * target is rediscovered or devloss timeout expires. 994 * target is rediscovered or devloss timeout expires.
982 */ 995 */
983 while ( 1 ) { 996 while ( 1 ) {
984 if (!pnode) 997 if (!pnode)
985 break; 998 return FAILED;
986 999
987 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1000 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
988 spin_unlock_irq(phba->host->host_lock); 1001 spin_unlock_irq(phba->host->host_lock);
989 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1002 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
990 spin_lock_irq(phba->host->host_lock); 1003 spin_lock_irq(phba->host->host_lock);
1004 loopcnt++;
1005 rdata = cmnd->device->hostdata;
1006 if (!rdata ||
1007 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
1008 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1009 "%d:0721 LUN Reset rport failure:"
1010 " cnt x%x rdata x%p\n",
1011 phba->brd_no, loopcnt, rdata);
1012 goto out;
1013 }
1014 pnode = rdata->pnode;
1015 if (!pnode)
1016 return FAILED;
991 } 1017 }
992 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE)) 1018 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
993 break; 1019 break;
994 } 1020 }
995 1021
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 582f5ea4e84e..a4128e19338a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -117,6 +117,10 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
117 case CMD_FCP_IREAD_CX: 117 case CMD_FCP_IREAD_CX:
118 case CMD_FCP_ICMND_CR: 118 case CMD_FCP_ICMND_CR:
119 case CMD_FCP_ICMND_CX: 119 case CMD_FCP_ICMND_CX:
120 case CMD_FCP_TSEND_CX:
121 case CMD_FCP_TRSP_CX:
122 case CMD_FCP_TRECEIVE_CX:
123 case CMD_FCP_AUTO_TRSP_CX:
120 case CMD_ADAPTER_MSG: 124 case CMD_ADAPTER_MSG:
121 case CMD_ADAPTER_DUMP: 125 case CMD_ADAPTER_DUMP:
122 case CMD_XMIT_SEQUENCE64_CR: 126 case CMD_XMIT_SEQUENCE64_CR:
@@ -131,6 +135,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
131 case CMD_FCP_IREAD64_CX: 135 case CMD_FCP_IREAD64_CX:
132 case CMD_FCP_ICMND64_CR: 136 case CMD_FCP_ICMND64_CR:
133 case CMD_FCP_ICMND64_CX: 137 case CMD_FCP_ICMND64_CX:
138 case CMD_FCP_TSEND64_CX:
139 case CMD_FCP_TRSP64_CX:
140 case CMD_FCP_TRECEIVE64_CX:
134 case CMD_GEN_REQUEST64_CR: 141 case CMD_GEN_REQUEST64_CR:
135 case CMD_GEN_REQUEST64_CX: 142 case CMD_GEN_REQUEST64_CX:
136 case CMD_XMIT_ELS_RSP64_CX: 143 case CMD_XMIT_ELS_RSP64_CX:
@@ -1098,6 +1105,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1098 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1105 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1099 (uint32_t *) &rspiocbq.iocb, 1106 (uint32_t *) &rspiocbq.iocb,
1100 sizeof (IOCB_t)); 1107 sizeof (IOCB_t));
1108 INIT_LIST_HEAD(&(rspiocbq.list));
1101 irsp = &rspiocbq.iocb; 1109 irsp = &rspiocbq.iocb;
1102 1110
1103 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1111 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
@@ -1149,6 +1157,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1149 } 1157 }
1150 } 1158 }
1151 break; 1159 break;
1160 case LPFC_UNSOL_IOCB:
1161 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1162 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1163 spin_lock_irqsave(phba->host->host_lock, iflag);
1164 break;
1152 default: 1165 default:
1153 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1166 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1154 char adaptermsg[LPFC_MAX_ADPTMSG]; 1167 char adaptermsg[LPFC_MAX_ADPTMSG];
@@ -2472,13 +2485,17 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
2472 psli = &phba->sli; 2485 psli = &phba->sli;
2473 2486
2474 /* Adjust cmd/rsp ring iocb entries more evenly */ 2487 /* Adjust cmd/rsp ring iocb entries more evenly */
2488
2489 /* Take some away from the FCP ring */
2475 pring = &psli->ring[psli->fcp_ring]; 2490 pring = &psli->ring[psli->fcp_ring];
2476 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2491 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2477 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2492 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2478 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2493 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2479 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2494 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2480 2495
2481 pring = &psli->ring[1]; 2496 /* and give them to the extra ring */
2497 pring = &psli->ring[psli->extra_ring];
2498
2482 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2499 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2483 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2500 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2484 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2501 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
@@ -2488,8 +2505,8 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
2488 pring->iotag_max = 4096; 2505 pring->iotag_max = 4096;
2489 pring->num_mask = 1; 2506 pring->num_mask = 1;
2490 pring->prt[0].profile = 0; /* Mask 0 */ 2507 pring->prt[0].profile = 0; /* Mask 0 */
2491 pring->prt[0].rctl = FC_UNSOL_DATA; 2508 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2492 pring->prt[0].type = 5; 2509 pring->prt[0].type = phba->cfg_multi_ring_type;
2493 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 2510 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2494 return 0; 2511 return 0;
2495} 2512}
@@ -2505,7 +2522,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2505 psli->sli_flag = 0; 2522 psli->sli_flag = 0;
2506 psli->fcp_ring = LPFC_FCP_RING; 2523 psli->fcp_ring = LPFC_FCP_RING;
2507 psli->next_ring = LPFC_FCP_NEXT_RING; 2524 psli->next_ring = LPFC_FCP_NEXT_RING;
2508 psli->ip_ring = LPFC_IP_RING; 2525 psli->extra_ring = LPFC_EXTRA_RING;
2509 2526
2510 psli->iocbq_lookup = NULL; 2527 psli->iocbq_lookup = NULL;
2511 psli->iocbq_lookup_len = 0; 2528 psli->iocbq_lookup_len = 0;
@@ -2528,7 +2545,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2528 pring->fast_iotag = pring->iotag_max; 2545 pring->fast_iotag = pring->iotag_max;
2529 pring->num_mask = 0; 2546 pring->num_mask = 0;
2530 break; 2547 break;
2531 case LPFC_IP_RING: /* ring 1 - IP */ 2548 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
2532 /* numCiocb and numRiocb are used in config_port */ 2549 /* numCiocb and numRiocb are used in config_port */
2533 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 2550 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2534 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 2551 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
@@ -3238,6 +3255,21 @@ lpfc_intr_handler(int irq, void *dev_id)
3238 lpfc_sli_handle_fast_ring_event(phba, 3255 lpfc_sli_handle_fast_ring_event(phba,
3239 &phba->sli.ring[LPFC_FCP_RING], 3256 &phba->sli.ring[LPFC_FCP_RING],
3240 status); 3257 status);
3258
3259 if (phba->cfg_multi_ring_support == 2) {
3260 /*
3261 * Process all events on extra ring. Take the optimized path
3262 * for extra ring IO. Any other IO is slow path and is handled
3263 * by the worker thread.
3264 */
3265 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
3266 status >>= (4*LPFC_EXTRA_RING);
3267 if (status & HA_RXATT) {
3268 lpfc_sli_handle_fast_ring_event(phba,
3269 &phba->sli.ring[LPFC_EXTRA_RING],
3270 status);
3271 }
3272 }
3241 return IRQ_HANDLED; 3273 return IRQ_HANDLED;
3242 3274
3243} /* lpfc_intr_handler */ 3275} /* lpfc_intr_handler */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index e26de6809358..a43549959dc7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -198,7 +198,7 @@ struct lpfc_sli {
198 int fcp_ring; /* ring used for FCP initiator commands */ 198 int fcp_ring; /* ring used for FCP initiator commands */
199 int next_ring; 199 int next_ring;
200 200
201 int ip_ring; /* ring used for IP network drv cmds */ 201 int extra_ring; /* extra ring used for other protocols */
202 202
203 struct lpfc_sli_stat slistat; /* SLI statistical info */ 203 struct lpfc_sli_stat slistat; /* SLI statistical info */
204 struct list_head mboxq; 204 struct list_head mboxq;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ac417908b407..a61ef3d1e7f1 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.10" 21#define LPFC_DRIVER_VERSION "8.1.11"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 86099fde1b2a..77d9d3804ccf 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -73,10 +73,10 @@ static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
73module_param(max_mbox_busy_wait, ushort, 0); 73module_param(max_mbox_busy_wait, ushort, 0);
74MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); 74MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
75 75
76#define RDINDOOR(adapter) readl((adapter)->base + 0x20) 76#define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
77#define RDOUTDOOR(adapter) readl((adapter)->base + 0x2C) 77#define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
78#define WRINDOOR(adapter,value) writel(value, (adapter)->base + 0x20) 78#define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
79#define WROUTDOOR(adapter,value) writel(value, (adapter)->base + 0x2C) 79#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
80 80
81/* 81/*
82 * Global variables 82 * Global variables
@@ -1386,7 +1386,8 @@ megaraid_isr_memmapped(int irq, void *devp)
1386 1386
1387 handled = 1; 1387 handled = 1;
1388 1388
1389 while( RDINDOOR(adapter) & 0x02 ) cpu_relax(); 1389 while( RDINDOOR(adapter) & 0x02 )
1390 cpu_relax();
1390 1391
1391 mega_cmd_done(adapter, completed, nstatus, status); 1392 mega_cmd_done(adapter, completed, nstatus, status);
1392 1393
@@ -4668,6 +4669,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4668 host->host_no, mega_baseport, irq); 4669 host->host_no, mega_baseport, irq);
4669 4670
4670 adapter->base = mega_baseport; 4671 adapter->base = mega_baseport;
4672 if (flag & BOARD_MEMMAP)
4673 adapter->mmio_base = (void __iomem *) mega_baseport;
4671 4674
4672 INIT_LIST_HEAD(&adapter->free_list); 4675 INIT_LIST_HEAD(&adapter->free_list);
4673 INIT_LIST_HEAD(&adapter->pending_list); 4676 INIT_LIST_HEAD(&adapter->pending_list);
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 66529f11d23c..c6e74643abe2 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -801,7 +801,8 @@ typedef struct {
801 clustering is available */ 801 clustering is available */
802 u32 flag; 802 u32 flag;
803 803
804 unsigned long base; 804 unsigned long base;
805 void __iomem *mmio_base;
805 806
806 /* mbox64 with mbox not aligned on 16-byte boundry */ 807 /* mbox64 with mbox not aligned on 16-byte boundry */
807 mbox64_t *una_mbox64; 808 mbox64_t *una_mbox64;
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 7e4262f2af96..046223b4ae57 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -517,7 +517,7 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
517 * Returns the number of frames required for numnber of sge's (sge_count) 517 * Returns the number of frames required for numnber of sge's (sge_count)
518 */ 518 */
519 519
520u32 megasas_get_frame_count(u8 sge_count) 520static u32 megasas_get_frame_count(u8 sge_count)
521{ 521{
522 int num_cnt; 522 int num_cnt;
523 int sge_bytes; 523 int sge_bytes;
@@ -1733,7 +1733,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
1733 * 1733 *
1734 * Tasklet to complete cmds 1734 * Tasklet to complete cmds
1735 */ 1735 */
1736void megasas_complete_cmd_dpc(unsigned long instance_addr) 1736static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1737{ 1737{
1738 u32 producer; 1738 u32 producer;
1739 u32 consumer; 1739 u32 consumer;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index adb8eb4f5fd1..bbf521cbc55d 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -589,10 +589,12 @@ static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
589static struct ncr_driver_setup 589static struct ncr_driver_setup
590 driver_setup = SCSI_NCR_DRIVER_SETUP; 590 driver_setup = SCSI_NCR_DRIVER_SETUP;
591 591
592#ifndef MODULE
592#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT 593#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
593static struct ncr_driver_setup 594static struct ncr_driver_setup
594 driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP; 595 driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
595#endif 596#endif
597#endif /* !MODULE */
596 598
597#define initverbose (driver_setup.verbose) 599#define initverbose (driver_setup.verbose)
598#define bootverbose (np->verbose) 600#define bootverbose (np->verbose)
@@ -641,6 +643,13 @@ static struct ncr_driver_setup
641#define OPT_IARB 26 643#define OPT_IARB 26
642#endif 644#endif
643 645
646#ifdef MODULE
647#define ARG_SEP ' '
648#else
649#define ARG_SEP ','
650#endif
651
652#ifndef MODULE
644static char setup_token[] __initdata = 653static char setup_token[] __initdata =
645 "tags:" "mpar:" 654 "tags:" "mpar:"
646 "spar:" "disc:" 655 "spar:" "disc:"
@@ -660,12 +669,6 @@ static char setup_token[] __initdata =
660#endif 669#endif
661 ; /* DONNOT REMOVE THIS ';' */ 670 ; /* DONNOT REMOVE THIS ';' */
662 671
663#ifdef MODULE
664#define ARG_SEP ' '
665#else
666#define ARG_SEP ','
667#endif
668
669static int __init get_setup_token(char *p) 672static int __init get_setup_token(char *p)
670{ 673{
671 char *cur = setup_token; 674 char *cur = setup_token;
@@ -682,7 +685,6 @@ static int __init get_setup_token(char *p)
682 return 0; 685 return 0;
683} 686}
684 687
685
686static int __init sym53c8xx__setup(char *str) 688static int __init sym53c8xx__setup(char *str)
687{ 689{
688#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT 690#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
@@ -804,6 +806,7 @@ static int __init sym53c8xx__setup(char *str)
804#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */ 806#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
805 return 1; 807 return 1;
806} 808}
809#endif /* !MODULE */
807 810
808/*=================================================================== 811/*===================================================================
809** 812**
@@ -8321,12 +8324,12 @@ char *ncr53c8xx; /* command line passed by insmod */
8321module_param(ncr53c8xx, charp, 0); 8324module_param(ncr53c8xx, charp, 0);
8322#endif 8325#endif
8323 8326
8327#ifndef MODULE
8324static int __init ncr53c8xx_setup(char *str) 8328static int __init ncr53c8xx_setup(char *str)
8325{ 8329{
8326 return sym53c8xx__setup(str); 8330 return sym53c8xx__setup(str);
8327} 8331}
8328 8332
8329#ifndef MODULE
8330__setup("ncr53c8xx=", ncr53c8xx_setup); 8333__setup("ncr53c8xx=", ncr53c8xx_setup);
8331#endif 8334#endif
8332 8335
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index ee449b29fc82..aad362ba02e0 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -154,16 +154,11 @@ static int aha152x_config_cs(struct pcmcia_device *link)
154 154
155 DEBUG(0, "aha152x_config(0x%p)\n", link); 155 DEBUG(0, "aha152x_config(0x%p)\n", link);
156 156
157 tuple.DesiredTuple = CISTPL_CONFIG;
158 tuple.TupleData = tuple_data; 157 tuple.TupleData = tuple_data;
159 tuple.TupleDataMax = 64; 158 tuple.TupleDataMax = 64;
160 tuple.TupleOffset = 0; 159 tuple.TupleOffset = 0;
161 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
162 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
163 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
164 link->conf.ConfigBase = parse.config.base;
165
166 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 160 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
161 tuple.Attributes = 0;
167 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 162 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
168 while (1) { 163 while (1) {
169 if (pcmcia_get_tuple_data(link, &tuple) != 0 || 164 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 85f7ffac19a0..a1c5f265069f 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -136,14 +136,9 @@ static int fdomain_config(struct pcmcia_device *link)
136 136
137 DEBUG(0, "fdomain_config(0x%p)\n", link); 137 DEBUG(0, "fdomain_config(0x%p)\n", link);
138 138
139 tuple.DesiredTuple = CISTPL_CONFIG;
140 tuple.TupleData = tuple_data; 139 tuple.TupleData = tuple_data;
141 tuple.TupleDataMax = 64; 140 tuple.TupleDataMax = 64;
142 tuple.TupleOffset = 0; 141 tuple.TupleOffset = 0;
143 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
144 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
145 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
146 link->conf.ConfigBase = parse.config.base;
147 142
148 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 143 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
149 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 144 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index f2d79c3f0b8e..d72df5dae4ee 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1685,16 +1685,10 @@ static int nsp_cs_config(struct pcmcia_device *link)
1685 1685
1686 nsp_dbg(NSP_DEBUG_INIT, "in"); 1686 nsp_dbg(NSP_DEBUG_INIT, "in");
1687 1687
1688 tuple.DesiredTuple = CISTPL_CONFIG;
1689 tuple.Attributes = 0; 1688 tuple.Attributes = 0;
1690 tuple.TupleData = tuple_data; 1689 tuple.TupleData = tuple_data;
1691 tuple.TupleDataMax = sizeof(tuple_data); 1690 tuple.TupleDataMax = sizeof(tuple_data);
1692 tuple.TupleOffset = 0; 1691 tuple.TupleOffset = 0;
1693 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
1694 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
1695 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
1696 link->conf.ConfigBase = parse.config.base;
1697 link->conf.Present = parse.config.rmask[0];
1698 1692
1699 /* Look up the current Vcc */ 1693 /* Look up the current Vcc */
1700 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf)); 1694 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 86c2ac6ae623..9d431fe7f47f 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -208,18 +208,11 @@ static int qlogic_config(struct pcmcia_device * link)
208 208
209 DEBUG(0, "qlogic_config(0x%p)\n", link); 209 DEBUG(0, "qlogic_config(0x%p)\n", link);
210 210
211 info->manf_id = link->manf_id;
212
211 tuple.TupleData = (cisdata_t *) tuple_data; 213 tuple.TupleData = (cisdata_t *) tuple_data;
212 tuple.TupleDataMax = 64; 214 tuple.TupleDataMax = 64;
213 tuple.TupleOffset = 0; 215 tuple.TupleOffset = 0;
214 tuple.DesiredTuple = CISTPL_CONFIG;
215 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
216 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
217 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
218 link->conf.ConfigBase = parse.config.base;
219
220 tuple.DesiredTuple = CISTPL_MANFID;
221 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) && (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
222 info->manf_id = le16_to_cpu(tuple.TupleData[0]);
223 216
224 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 217 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
225 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 218 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 72fe5d055de1..fb7acea60286 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -722,19 +722,11 @@ SYM53C500_config(struct pcmcia_device *link)
722 722
723 DEBUG(0, "SYM53C500_config(0x%p)\n", link); 723 DEBUG(0, "SYM53C500_config(0x%p)\n", link);
724 724
725 info->manf_id = link->manf_id;
726
725 tuple.TupleData = (cisdata_t *)tuple_data; 727 tuple.TupleData = (cisdata_t *)tuple_data;
726 tuple.TupleDataMax = 64; 728 tuple.TupleDataMax = 64;
727 tuple.TupleOffset = 0; 729 tuple.TupleOffset = 0;
728 tuple.DesiredTuple = CISTPL_CONFIG;
729 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
730 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
731 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
732 link->conf.ConfigBase = parse.config.base;
733
734 tuple.DesiredTuple = CISTPL_MANFID;
735 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
736 (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
737 info->manf_id = le16_to_cpu(tuple.TupleData[0]);
738 730
739 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 731 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
740 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 732 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 89a2a9f11e41..584ba4d6e038 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -31,7 +31,7 @@ typedef struct {
31 int base; /* Actual port address */ 31 int base; /* Actual port address */
32 int mode; /* Transfer mode */ 32 int mode; /* Transfer mode */
33 struct scsi_cmnd *cur_cmd; /* Current queued command */ 33 struct scsi_cmnd *cur_cmd; /* Current queued command */
34 struct work_struct ppa_tq; /* Polling interrupt stuff */ 34 struct delayed_work ppa_tq; /* Polling interrupt stuff */
35 unsigned long jstart; /* Jiffies at start */ 35 unsigned long jstart; /* Jiffies at start */
36 unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ 36 unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */
37 unsigned int failed:1; /* Failure flag */ 37 unsigned int failed:1; /* Failure flag */
@@ -627,9 +627,9 @@ static int ppa_completion(struct scsi_cmnd *cmd)
627 * the scheduler's task queue to generate a stream of call-backs and 627 * the scheduler's task queue to generate a stream of call-backs and
628 * complete the request when the drive is ready. 628 * complete the request when the drive is ready.
629 */ 629 */
630static void ppa_interrupt(void *data) 630static void ppa_interrupt(struct work_struct *work)
631{ 631{
632 ppa_struct *dev = (ppa_struct *) data; 632 ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
633 struct scsi_cmnd *cmd = dev->cur_cmd; 633 struct scsi_cmnd *cmd = dev->cur_cmd;
634 634
635 if (!cmd) { 635 if (!cmd) {
@@ -637,7 +637,6 @@ static void ppa_interrupt(void *data)
637 return; 637 return;
638 } 638 }
639 if (ppa_engine(dev, cmd)) { 639 if (ppa_engine(dev, cmd)) {
640 dev->ppa_tq.data = (void *) dev;
641 schedule_delayed_work(&dev->ppa_tq, 1); 640 schedule_delayed_work(&dev->ppa_tq, 1);
642 return; 641 return;
643 } 642 }
@@ -822,8 +821,7 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd,
822 cmd->result = DID_ERROR << 16; /* default return code */ 821 cmd->result = DID_ERROR << 16; /* default return code */
823 cmd->SCp.phase = 0; /* bus free */ 822 cmd->SCp.phase = 0; /* bus free */
824 823
825 dev->ppa_tq.data = dev; 824 schedule_delayed_work(&dev->ppa_tq, 0);
826 schedule_work(&dev->ppa_tq);
827 825
828 ppa_pb_claim(dev); 826 ppa_pb_claim(dev);
829 827
@@ -1086,7 +1084,7 @@ static int __ppa_attach(struct parport *pb)
1086 else 1084 else
1087 ports = 8; 1085 ports = 8;
1088 1086
1089 INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev); 1087 INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
1090 1088
1091 err = -ENOMEM; 1089 err = -ENOMEM;
1092 host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); 1090 host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 285c8e8ff1a0..7b18a6c7b7eb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -390,7 +390,7 @@ static struct sysfs_entry {
390 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 390 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
391 { "vpd", &sysfs_vpd_attr, 1 }, 391 { "vpd", &sysfs_vpd_attr, 1 },
392 { "sfp", &sysfs_sfp_attr, 1 }, 392 { "sfp", &sysfs_sfp_attr, 1 },
393 { 0 }, 393 { NULL },
394}; 394};
395 395
396void 396void
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 08cb5e3fb553..a823f0bc519d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -59,9 +59,6 @@ int
59qla2x00_initialize_adapter(scsi_qla_host_t *ha) 59qla2x00_initialize_adapter(scsi_qla_host_t *ha)
60{ 60{
61 int rval; 61 int rval;
62 uint8_t restart_risc = 0;
63 uint8_t retry;
64 uint32_t wait_time;
65 62
66 /* Clear adapter flags. */ 63 /* Clear adapter flags. */
67 ha->flags.online = 0; 64 ha->flags.online = 0;
@@ -104,87 +101,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
104 101
105 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 102 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
106 103
107 retry = 10; 104 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
108 /* 105 rval = ha->isp_ops.chip_diag(ha);
109 * Try to configure the loop. 106 if (rval)
110 */ 107 return (rval);
111 do { 108 rval = qla2x00_setup_chip(ha);
112 restart_risc = 0; 109 if (rval)
113 110 return (rval);
114 /* If firmware needs to be loaded */
115 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
116 if ((rval = ha->isp_ops.chip_diag(ha)) == QLA_SUCCESS) {
117 rval = qla2x00_setup_chip(ha);
118 }
119 }
120
121 if (rval == QLA_SUCCESS &&
122 (rval = qla2x00_init_rings(ha)) == QLA_SUCCESS) {
123check_fw_ready_again:
124 /*
125 * Wait for a successful LIP up to a maximum
126 * of (in seconds): RISC login timeout value,
127 * RISC retry count value, and port down retry
128 * value OR a minimum of 4 seconds OR If no
129 * cable, only 5 seconds.
130 */
131 rval = qla2x00_fw_ready(ha);
132 if (rval == QLA_SUCCESS) {
133 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
134
135 /* Issue a marker after FW becomes ready. */
136 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
137
138 /*
139 * Wait at most MAX_TARGET RSCNs for a stable
140 * link.
141 */
142 wait_time = 256;
143 do {
144 clear_bit(LOOP_RESYNC_NEEDED,
145 &ha->dpc_flags);
146 rval = qla2x00_configure_loop(ha);
147
148 if (test_and_clear_bit(ISP_ABORT_NEEDED,
149 &ha->dpc_flags)) {
150 restart_risc = 1;
151 break;
152 }
153
154 /*
155 * If loop state change while we were
156 * discoverying devices then wait for
157 * LIP to complete
158 */
159
160 if (atomic_read(&ha->loop_state) !=
161 LOOP_READY && retry--) {
162 goto check_fw_ready_again;
163 }
164 wait_time--;
165 } while (!atomic_read(&ha->loop_down_timer) &&
166 retry &&
167 wait_time &&
168 (test_bit(LOOP_RESYNC_NEEDED,
169 &ha->dpc_flags)));
170
171 if (wait_time == 0)
172 rval = QLA_FUNCTION_FAILED;
173 } else if (ha->device_flags & DFLG_NO_CABLE)
174 /* If no cable, then all is good. */
175 rval = QLA_SUCCESS;
176 }
177 } while (restart_risc && retry--);
178
179 if (rval == QLA_SUCCESS) {
180 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
181 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
182 ha->marker_needed = 0;
183
184 ha->flags.online = 1;
185 } else {
186 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
187 } 111 }
112 rval = qla2x00_init_rings(ha);
188 113
189 return (rval); 114 return (rval);
190} 115}
@@ -2208,8 +2133,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2208 2133
2209 atomic_set(&fcport->state, FCS_ONLINE); 2134 atomic_set(&fcport->state, FCS_ONLINE);
2210 2135
2211 if (ha->flags.init_done) 2136 qla2x00_reg_remote_port(ha, fcport);
2212 qla2x00_reg_remote_port(ha, fcport);
2213} 2137}
2214 2138
2215void 2139void
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 208607be78c7..cbe0cad83b68 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -95,6 +95,8 @@ MODULE_PARM_DESC(ql2xqfullrampup,
95 */ 95 */
96static int qla2xxx_slave_configure(struct scsi_device * device); 96static int qla2xxx_slave_configure(struct scsi_device * device);
97static int qla2xxx_slave_alloc(struct scsi_device *); 97static int qla2xxx_slave_alloc(struct scsi_device *);
98static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
99static void qla2xxx_scan_start(struct Scsi_Host *);
98static void qla2xxx_slave_destroy(struct scsi_device *); 100static void qla2xxx_slave_destroy(struct scsi_device *);
99static int qla2x00_queuecommand(struct scsi_cmnd *cmd, 101static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
100 void (*fn)(struct scsi_cmnd *)); 102 void (*fn)(struct scsi_cmnd *));
@@ -124,6 +126,8 @@ static struct scsi_host_template qla2x00_driver_template = {
124 126
125 .slave_alloc = qla2xxx_slave_alloc, 127 .slave_alloc = qla2xxx_slave_alloc,
126 .slave_destroy = qla2xxx_slave_destroy, 128 .slave_destroy = qla2xxx_slave_destroy,
129 .scan_finished = qla2xxx_scan_finished,
130 .scan_start = qla2xxx_scan_start,
127 .change_queue_depth = qla2x00_change_queue_depth, 131 .change_queue_depth = qla2x00_change_queue_depth,
128 .change_queue_type = qla2x00_change_queue_type, 132 .change_queue_type = qla2x00_change_queue_type,
129 .this_id = -1, 133 .this_id = -1,
@@ -287,7 +291,7 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
287 return str; 291 return str;
288} 292}
289 293
290char * 294static char *
291qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 295qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
292{ 296{
293 char un_str[10]; 297 char un_str[10];
@@ -325,7 +329,7 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
325 return (str); 329 return (str);
326} 330}
327 331
328char * 332static char *
329qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 333qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
330{ 334{
331 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 335 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
@@ -634,7 +638,7 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
634* Note: 638* Note:
635* Only return FAILED if command not returned by firmware. 639* Only return FAILED if command not returned by firmware.
636**************************************************************************/ 640**************************************************************************/
637int 641static int
638qla2xxx_eh_abort(struct scsi_cmnd *cmd) 642qla2xxx_eh_abort(struct scsi_cmnd *cmd)
639{ 643{
640 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 644 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -771,7 +775,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
771* SUCCESS/FAILURE (defined as macro in scsi.h). 775* SUCCESS/FAILURE (defined as macro in scsi.h).
772* 776*
773**************************************************************************/ 777**************************************************************************/
774int 778static int
775qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 779qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
776{ 780{
777 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 781 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -902,7 +906,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha)
902* SUCCESS/FAILURE (defined as macro in scsi.h). 906* SUCCESS/FAILURE (defined as macro in scsi.h).
903* 907*
904**************************************************************************/ 908**************************************************************************/
905int 909static int
906qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 910qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
907{ 911{
908 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 912 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -963,7 +967,7 @@ eh_bus_reset_done:
963* 967*
964* Note: 968* Note:
965**************************************************************************/ 969**************************************************************************/
966int 970static int
967qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 971qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
968{ 972{
969 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 973 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -1366,6 +1370,29 @@ qla24xx_disable_intrs(scsi_qla_host_t *ha)
1366 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1370 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1367} 1371}
1368 1372
1373static void
1374qla2xxx_scan_start(struct Scsi_Host *shost)
1375{
1376 scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
1377
1378 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
1379 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
1380 set_bit(RSCN_UPDATE, &ha->dpc_flags);
1381}
1382
1383static int
1384qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1385{
1386 scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
1387
1388 if (!ha->host)
1389 return 1;
1390 if (time > ha->loop_reset_delay * HZ)
1391 return 1;
1392
1393 return atomic_read(&ha->loop_state) == LOOP_READY;
1394}
1395
1369/* 1396/*
1370 * PCI driver interface 1397 * PCI driver interface
1371 */ 1398 */
@@ -1377,10 +1404,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1377 struct Scsi_Host *host; 1404 struct Scsi_Host *host;
1378 scsi_qla_host_t *ha; 1405 scsi_qla_host_t *ha;
1379 unsigned long flags = 0; 1406 unsigned long flags = 0;
1380 unsigned long wait_switch = 0;
1381 char pci_info[20]; 1407 char pci_info[20];
1382 char fw_str[30]; 1408 char fw_str[30];
1383 fc_port_t *fcport;
1384 struct scsi_host_template *sht; 1409 struct scsi_host_template *sht;
1385 1410
1386 if (pci_enable_device(pdev)) 1411 if (pci_enable_device(pdev))
@@ -1631,30 +1656,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1631 1656
1632 ha->isp_ops.enable_intrs(ha); 1657 ha->isp_ops.enable_intrs(ha);
1633 1658
1634 /* v2.19.5b6 */
1635 /*
1636 * Wait around max loop_reset_delay secs for the devices to come
1637 * on-line. We don't want Linux scanning before we are ready.
1638 *
1639 */
1640 for (wait_switch = jiffies + (ha->loop_reset_delay * HZ);
1641 time_before(jiffies,wait_switch) &&
1642 !(ha->device_flags & (DFLG_NO_CABLE | DFLG_FABRIC_DEVICES))
1643 && (ha->device_flags & SWITCH_FOUND) ;) {
1644
1645 qla2x00_check_fabric_devices(ha);
1646
1647 msleep(10);
1648 }
1649
1650 pci_set_drvdata(pdev, ha); 1659 pci_set_drvdata(pdev, ha);
1660
1651 ha->flags.init_done = 1; 1661 ha->flags.init_done = 1;
1662 ha->flags.online = 1;
1663
1652 num_hosts++; 1664 num_hosts++;
1653 1665
1654 ret = scsi_add_host(host, &pdev->dev); 1666 ret = scsi_add_host(host, &pdev->dev);
1655 if (ret) 1667 if (ret)
1656 goto probe_failed; 1668 goto probe_failed;
1657 1669
1670 scsi_scan_host(host);
1671
1658 qla2x00_alloc_sysfs_attr(ha); 1672 qla2x00_alloc_sysfs_attr(ha);
1659 1673
1660 qla2x00_init_host_attr(ha); 1674 qla2x00_init_host_attr(ha);
@@ -1669,10 +1683,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1669 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1683 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
1670 ha->isp_ops.fw_version_str(ha, fw_str)); 1684 ha->isp_ops.fw_version_str(ha, fw_str));
1671 1685
1672 /* Go with fc_rport registration. */
1673 list_for_each_entry(fcport, &ha->fcports, list)
1674 qla2x00_reg_remote_port(ha, fcport);
1675
1676 return 0; 1686 return 0;
1677 1687
1678probe_failed: 1688probe_failed:
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index c71dbd5bd543..15390ad87456 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -449,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
449 return FARX_ACCESS_NVRAM_DATA | naddr; 449 return FARX_ACCESS_NVRAM_DATA | naddr;
450} 450}
451 451
452uint32_t 452static uint32_t
453qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr) 453qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
454{ 454{
455 int rval; 455 int rval;
@@ -490,7 +490,7 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
490 return dwptr; 490 return dwptr;
491} 491}
492 492
493int 493static int
494qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data) 494qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
495{ 495{
496 int rval; 496 int rval;
@@ -512,7 +512,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
512 return rval; 512 return rval;
513} 513}
514 514
515void 515static void
516qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 516qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
517 uint8_t *flash_id) 517 uint8_t *flash_id)
518{ 518{
@@ -537,7 +537,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
537 } 537 }
538} 538}
539 539
540int 540static int
541qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, 541qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
542 uint32_t dwords) 542 uint32_t dwords)
543{ 543{
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 752031fadfef..7b4e077a39c1 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -71,7 +71,7 @@ void __dump_registers(struct scsi_qla_host *ha)
71 readw(&ha->reg->u1.isp4010.nvram)); 71 readw(&ha->reg->u1.isp4010.nvram));
72 } 72 }
73 73
74 else if (is_qla4022(ha)) { 74 else if (is_qla4022(ha) | is_qla4032(ha)) {
75 printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n", 75 printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
76 (uint8_t) offsetof(struct isp_reg, 76 (uint8_t) offsetof(struct isp_reg,
77 u1.isp4022.intr_mask), 77 u1.isp4022.intr_mask),
@@ -119,7 +119,7 @@ void __dump_registers(struct scsi_qla_host *ha)
119 readw(&ha->reg->u2.isp4010.port_err_status)); 119 readw(&ha->reg->u2.isp4010.port_err_status));
120 } 120 }
121 121
122 else if (is_qla4022(ha)) { 122 else if (is_qla4022(ha) | is_qla4032(ha)) {
123 printk(KERN_INFO "Page 0 Registers:\n"); 123 printk(KERN_INFO "Page 0 Registers:\n");
124 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", 124 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
125 (uint8_t) offsetof(struct isp_reg, 125 (uint8_t) offsetof(struct isp_reg,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7f6c7b1c590..4249e52a5592 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -40,7 +40,11 @@
40 40
41#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022 41#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
42#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022 42#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022
43#endif /* */ 43#endif
44
45#ifndef PCI_DEVICE_ID_QLOGIC_ISP4032
46#define PCI_DEVICE_ID_QLOGIC_ISP4032 0x4032
47#endif
44 48
45#define QLA_SUCCESS 0 49#define QLA_SUCCESS 0
46#define QLA_ERROR 1 50#define QLA_ERROR 1
@@ -277,7 +281,6 @@ struct scsi_qla_host {
277#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */ 281#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
278#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 282#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
279#define AF_LINK_UP 8 /* 0x00000100 */ 283#define AF_LINK_UP 8 /* 0x00000100 */
280#define AF_TOPCAT_CHIP_PRESENT 9 /* 0x00000200 */
281#define AF_IRQ_ATTACHED 10 /* 0x00000400 */ 284#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
282#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */ 285#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
283#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */ 286#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
@@ -317,16 +320,17 @@ struct scsi_qla_host {
317 /* NVRAM registers */ 320 /* NVRAM registers */
318 struct eeprom_data *nvram; 321 struct eeprom_data *nvram;
319 spinlock_t hardware_lock ____cacheline_aligned; 322 spinlock_t hardware_lock ____cacheline_aligned;
320 spinlock_t list_lock;
321 uint32_t eeprom_cmd_data; 323 uint32_t eeprom_cmd_data;
322 324
323 /* Counters for general statistics */ 325 /* Counters for general statistics */
326 uint64_t isr_count;
324 uint64_t adapter_error_count; 327 uint64_t adapter_error_count;
325 uint64_t device_error_count; 328 uint64_t device_error_count;
326 uint64_t total_io_count; 329 uint64_t total_io_count;
327 uint64_t total_mbytes_xferred; 330 uint64_t total_mbytes_xferred;
328 uint64_t link_failure_count; 331 uint64_t link_failure_count;
329 uint64_t invalid_crc_count; 332 uint64_t invalid_crc_count;
333 uint32_t bytes_xfered;
330 uint32_t spurious_int_count; 334 uint32_t spurious_int_count;
331 uint32_t aborted_io_count; 335 uint32_t aborted_io_count;
332 uint32_t io_timeout_count; 336 uint32_t io_timeout_count;
@@ -438,6 +442,11 @@ static inline int is_qla4022(struct scsi_qla_host *ha)
438 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022; 442 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
439} 443}
440 444
445static inline int is_qla4032(struct scsi_qla_host *ha)
446{
447 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
448}
449
441static inline int adapter_up(struct scsi_qla_host *ha) 450static inline int adapter_up(struct scsi_qla_host *ha)
442{ 451{
443 return (test_bit(AF_ONLINE, &ha->flags) != 0) && 452 return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
@@ -451,58 +460,58 @@ static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
451 460
452static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha) 461static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
453{ 462{
454 return (is_qla4022(ha) ? 463 return (is_qla4010(ha) ?
455 &ha->reg->u1.isp4022.semaphore : 464 &ha->reg->u1.isp4010.nvram :
456 &ha->reg->u1.isp4010.nvram); 465 &ha->reg->u1.isp4022.semaphore);
457} 466}
458 467
459static inline void __iomem* isp_nvram(struct scsi_qla_host *ha) 468static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
460{ 469{
461 return (is_qla4022(ha) ? 470 return (is_qla4010(ha) ?
462 &ha->reg->u1.isp4022.nvram : 471 &ha->reg->u1.isp4010.nvram :
463 &ha->reg->u1.isp4010.nvram); 472 &ha->reg->u1.isp4022.nvram);
464} 473}
465 474
466static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha) 475static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
467{ 476{
468 return (is_qla4022(ha) ? 477 return (is_qla4010(ha) ?
469 &ha->reg->u2.isp4022.p0.ext_hw_conf : 478 &ha->reg->u2.isp4010.ext_hw_conf :
470 &ha->reg->u2.isp4010.ext_hw_conf); 479 &ha->reg->u2.isp4022.p0.ext_hw_conf);
471} 480}
472 481
473static inline void __iomem* isp_port_status(struct scsi_qla_host *ha) 482static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
474{ 483{
475 return (is_qla4022(ha) ? 484 return (is_qla4010(ha) ?
476 &ha->reg->u2.isp4022.p0.port_status : 485 &ha->reg->u2.isp4010.port_status :
477 &ha->reg->u2.isp4010.port_status); 486 &ha->reg->u2.isp4022.p0.port_status);
478} 487}
479 488
480static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha) 489static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
481{ 490{
482 return (is_qla4022(ha) ? 491 return (is_qla4010(ha) ?
483 &ha->reg->u2.isp4022.p0.port_ctrl : 492 &ha->reg->u2.isp4010.port_ctrl :
484 &ha->reg->u2.isp4010.port_ctrl); 493 &ha->reg->u2.isp4022.p0.port_ctrl);
485} 494}
486 495
487static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha) 496static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
488{ 497{
489 return (is_qla4022(ha) ? 498 return (is_qla4010(ha) ?
490 &ha->reg->u2.isp4022.p0.port_err_status : 499 &ha->reg->u2.isp4010.port_err_status :
491 &ha->reg->u2.isp4010.port_err_status); 500 &ha->reg->u2.isp4022.p0.port_err_status);
492} 501}
493 502
494static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha) 503static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
495{ 504{
496 return (is_qla4022(ha) ? 505 return (is_qla4010(ha) ?
497 &ha->reg->u2.isp4022.p0.gp_out : 506 &ha->reg->u2.isp4010.gp_out :
498 &ha->reg->u2.isp4010.gp_out); 507 &ha->reg->u2.isp4022.p0.gp_out);
499} 508}
500 509
501static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha) 510static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
502{ 511{
503 return (is_qla4022(ha) ? 512 return (is_qla4010(ha) ?
504 offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 : 513 offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2 :
505 offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2); 514 offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2);
506} 515}
507 516
508int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits); 517int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
@@ -511,59 +520,59 @@ int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
511 520
512static inline int ql4xxx_lock_flash(struct scsi_qla_host *a) 521static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
513{ 522{
514 if (is_qla4022(a)) 523 if (is_qla4010(a))
524 return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
525 QL4010_FLASH_SEM_BITS);
526 else
515 return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK, 527 return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
516 (QL4022_RESOURCE_BITS_BASE_CODE | 528 (QL4022_RESOURCE_BITS_BASE_CODE |
517 (a->mac_index)) << 13); 529 (a->mac_index)) << 13);
518 else
519 return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
520 QL4010_FLASH_SEM_BITS);
521} 530}
522 531
523static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a) 532static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
524{ 533{
525 if (is_qla4022(a)) 534 if (is_qla4010(a))
526 ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
527 else
528 ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK); 535 ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
536 else
537 ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
529} 538}
530 539
531static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a) 540static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
532{ 541{
533 if (is_qla4022(a)) 542 if (is_qla4010(a))
543 return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
544 QL4010_NVRAM_SEM_BITS);
545 else
534 return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK, 546 return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
535 (QL4022_RESOURCE_BITS_BASE_CODE | 547 (QL4022_RESOURCE_BITS_BASE_CODE |
536 (a->mac_index)) << 10); 548 (a->mac_index)) << 10);
537 else
538 return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
539 QL4010_NVRAM_SEM_BITS);
540} 549}
541 550
542static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a) 551static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
543{ 552{
544 if (is_qla4022(a)) 553 if (is_qla4010(a))
545 ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
546 else
547 ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK); 554 ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
555 else
556 ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
548} 557}
549 558
550static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a) 559static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
551{ 560{
552 if (is_qla4022(a)) 561 if (is_qla4010(a))
562 return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
563 QL4010_DRVR_SEM_BITS);
564 else
553 return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK, 565 return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
554 (QL4022_RESOURCE_BITS_BASE_CODE | 566 (QL4022_RESOURCE_BITS_BASE_CODE |
555 (a->mac_index)) << 1); 567 (a->mac_index)) << 1);
556 else
557 return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
558 QL4010_DRVR_SEM_BITS);
559} 568}
560 569
561static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a) 570static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
562{ 571{
563 if (is_qla4022(a)) 572 if (is_qla4010(a))
564 ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
565 else
566 ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK); 573 ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
574 else
575 ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
567} 576}
568 577
569/*---------------------------------------------------------------------------*/ 578/*---------------------------------------------------------------------------*/
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 427489de64bc..4eea8c571916 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -296,7 +296,6 @@ static inline uint32_t clr_rmask(uint32_t val)
296/* ISP Semaphore definitions */ 296/* ISP Semaphore definitions */
297 297
298/* ISP General Purpose Output definitions */ 298/* ISP General Purpose Output definitions */
299#define GPOR_TOPCAT_RESET 0x00000004
300 299
301/* shadow registers (DMA'd from HA to system memory. read only) */ 300/* shadow registers (DMA'd from HA to system memory. read only) */
302struct shadow_regs { 301struct shadow_regs {
@@ -339,10 +338,13 @@ union external_hw_config_reg {
339/* Mailbox command definitions */ 338/* Mailbox command definitions */
340#define MBOX_CMD_ABOUT_FW 0x0009 339#define MBOX_CMD_ABOUT_FW 0x0009
341#define MBOX_CMD_LUN_RESET 0x0016 340#define MBOX_CMD_LUN_RESET 0x0016
341#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
342#define MBOX_CMD_GET_FW_STATUS 0x001F 342#define MBOX_CMD_GET_FW_STATUS 0x001F
343#define MBOX_CMD_SET_ISNS_SERVICE 0x0021 343#define MBOX_CMD_SET_ISNS_SERVICE 0x0021
344#define ISNS_DISABLE 0 344#define ISNS_DISABLE 0
345#define ISNS_ENABLE 1 345#define ISNS_ENABLE 1
346#define MBOX_CMD_COPY_FLASH 0x0024
347#define MBOX_CMD_WRITE_FLASH 0x0025
346#define MBOX_CMD_READ_FLASH 0x0026 348#define MBOX_CMD_READ_FLASH 0x0026
347#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031 349#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
348#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056 350#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
@@ -360,10 +362,13 @@ union external_hw_config_reg {
360#define DDB_DS_SESSION_FAILED 0x06 362#define DDB_DS_SESSION_FAILED 0x06
361#define DDB_DS_LOGIN_IN_PROCESS 0x07 363#define DDB_DS_LOGIN_IN_PROCESS 0x07
362#define MBOX_CMD_GET_FW_STATE 0x0069 364#define MBOX_CMD_GET_FW_STATE 0x0069
365#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
366#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
363 367
364/* Mailbox 1 */ 368/* Mailbox 1 */
365#define FW_STATE_READY 0x0000 369#define FW_STATE_READY 0x0000
366#define FW_STATE_CONFIG_WAIT 0x0001 370#define FW_STATE_CONFIG_WAIT 0x0001
371#define FW_STATE_WAIT_LOGIN 0x0002
367#define FW_STATE_ERROR 0x0004 372#define FW_STATE_ERROR 0x0004
368#define FW_STATE_DHCP_IN_PROGRESS 0x0008 373#define FW_STATE_DHCP_IN_PROGRESS 0x0008
369 374
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 1b221ff0f6f7..2122967bbf0b 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -8,6 +8,7 @@
8#ifndef __QLA4x_GBL_H 8#ifndef __QLA4x_GBL_H
9#define __QLA4x_GBL_H 9#define __QLA4x_GBL_H
10 10
11int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
11int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port); 12int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
12int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb); 13int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
13int qla4xxx_initialize_adapter(struct scsi_qla_host * ha, 14int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
@@ -75,4 +76,4 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
75extern int ql4xextended_error_logging; 76extern int ql4xextended_error_logging;
76extern int ql4xdiscoverywait; 77extern int ql4xdiscoverywait;
77extern int ql4xdontresethba; 78extern int ql4xdontresethba;
78#endif /* _QLA4x_GBL_H */ 79#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index bb3a1c11f44c..cc210f297a78 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -259,10 +259,16 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
259 "seconds expired= %d\n", ha->host_no, __func__, 259 "seconds expired= %d\n", ha->host_no, __func__,
260 ha->firmware_state, ha->addl_fw_state, 260 ha->firmware_state, ha->addl_fw_state,
261 timeout_count)); 261 timeout_count));
262 if (is_qla4032(ha) &&
263 !(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) &&
264 (timeout_count < ADAPTER_INIT_TOV - 5)) {
265 break;
266 }
267
262 msleep(1000); 268 msleep(1000);
263 } /* end of for */ 269 } /* end of for */
264 270
265 if (timeout_count <= 0) 271 if (timeout_count == 0)
266 DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n", 272 DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
267 ha->host_no, __func__)); 273 ha->host_no, __func__));
268 274
@@ -806,32 +812,6 @@ int qla4xxx_relogin_device(struct scsi_qla_host *ha,
806 return QLA_SUCCESS; 812 return QLA_SUCCESS;
807} 813}
808 814
809/**
810 * qla4010_get_topcat_presence - check if it is QLA4040 TopCat Chip
811 * @ha: Pointer to host adapter structure.
812 *
813 **/
814static int qla4010_get_topcat_presence(struct scsi_qla_host *ha)
815{
816 unsigned long flags;
817 uint16_t topcat;
818
819 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS)
820 return QLA_ERROR;
821 spin_lock_irqsave(&ha->hardware_lock, flags);
822 topcat = rd_nvram_word(ha, offsetof(struct eeprom_data,
823 isp4010.topcat));
824 spin_unlock_irqrestore(&ha->hardware_lock, flags);
825
826 if ((topcat & TOPCAT_MASK) == TOPCAT_PRESENT)
827 set_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
828 else
829 clear_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
830 ql4xxx_unlock_nvram(ha);
831 return QLA_SUCCESS;
832}
833
834
835static int qla4xxx_config_nvram(struct scsi_qla_host *ha) 815static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
836{ 816{
837 unsigned long flags; 817 unsigned long flags;
@@ -866,7 +846,7 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
866 /* set defaults */ 846 /* set defaults */
867 if (is_qla4010(ha)) 847 if (is_qla4010(ha))
868 extHwConfig.Asuint32_t = 0x1912; 848 extHwConfig.Asuint32_t = 0x1912;
869 else if (is_qla4022(ha)) 849 else if (is_qla4022(ha) | is_qla4032(ha))
870 extHwConfig.Asuint32_t = 0x0023; 850 extHwConfig.Asuint32_t = 0x0023;
871 } 851 }
872 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", 852 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
@@ -927,7 +907,7 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
927 907
928 spin_lock_irqsave(&ha->hardware_lock, flags); 908 spin_lock_irqsave(&ha->hardware_lock, flags);
929 writel(jiffies, &ha->reg->mailbox[7]); 909 writel(jiffies, &ha->reg->mailbox[7]);
930 if (is_qla4022(ha)) 910 if (is_qla4022(ha) | is_qla4032(ha))
931 writel(set_rmask(NVR_WRITE_ENABLE), 911 writel(set_rmask(NVR_WRITE_ENABLE),
932 &ha->reg->u1.isp4022.nvram); 912 &ha->reg->u1.isp4022.nvram);
933 913
@@ -978,7 +958,7 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
978 return status; 958 return status;
979} 959}
980 960
981static int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a) 961int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
982{ 962{
983#define QL4_LOCK_DRVR_WAIT 300 963#define QL4_LOCK_DRVR_WAIT 300
984#define QL4_LOCK_DRVR_SLEEP 100 964#define QL4_LOCK_DRVR_SLEEP 100
@@ -1018,12 +998,7 @@ static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
1018 int soft_reset = 1; 998 int soft_reset = 1;
1019 int config_chip = 0; 999 int config_chip = 0;
1020 1000
1021 if (is_qla4010(ha)){ 1001 if (is_qla4022(ha) | is_qla4032(ha))
1022 if (qla4010_get_topcat_presence(ha) != QLA_SUCCESS)
1023 return QLA_ERROR;
1024 }
1025
1026 if (is_qla4022(ha))
1027 ql4xxx_set_mac_number(ha); 1002 ql4xxx_set_mac_number(ha);
1028 1003
1029 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 1004 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 0d61797af7da..6375eb017dd3 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -38,7 +38,7 @@ qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
38static inline void 38static inline void
39__qla4xxx_enable_intrs(struct scsi_qla_host *ha) 39__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
40{ 40{
41 if (is_qla4022(ha)) { 41 if (is_qla4022(ha) | is_qla4032(ha)) {
42 writel(set_rmask(IMR_SCSI_INTR_ENABLE), 42 writel(set_rmask(IMR_SCSI_INTR_ENABLE),
43 &ha->reg->u1.isp4022.intr_mask); 43 &ha->reg->u1.isp4022.intr_mask);
44 readl(&ha->reg->u1.isp4022.intr_mask); 44 readl(&ha->reg->u1.isp4022.intr_mask);
@@ -52,7 +52,7 @@ __qla4xxx_enable_intrs(struct scsi_qla_host *ha)
52static inline void 52static inline void
53__qla4xxx_disable_intrs(struct scsi_qla_host *ha) 53__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
54{ 54{
55 if (is_qla4022(ha)) { 55 if (is_qla4022(ha) | is_qla4032(ha)) {
56 writel(clr_rmask(IMR_SCSI_INTR_ENABLE), 56 writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
57 &ha->reg->u1.isp4022.intr_mask); 57 &ha->reg->u1.isp4022.intr_mask);
58 readl(&ha->reg->u1.isp4022.intr_mask); 58 readl(&ha->reg->u1.isp4022.intr_mask);
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index c0a254b89a30..d41ce380eedc 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -294,6 +294,12 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
294 cmd_entry->control_flags = CF_WRITE; 294 cmd_entry->control_flags = CF_WRITE;
295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
296 cmd_entry->control_flags = CF_READ; 296 cmd_entry->control_flags = CF_READ;
297
298 ha->bytes_xfered += cmd->request_bufflen;
299 if (ha->bytes_xfered & ~0xFFFFF){
300 ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
301 ha->bytes_xfered &= 0xFFFFF;
302 }
297 } 303 }
298 304
299 /* Set tagged queueing control flags */ 305 /* Set tagged queueing control flags */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 1e283321a59d..ef975e0dc87f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -627,6 +627,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
627 627
628 spin_lock_irqsave(&ha->hardware_lock, flags); 628 spin_lock_irqsave(&ha->hardware_lock, flags);
629 629
630 ha->isr_count++;
630 /* 631 /*
631 * Repeatedly service interrupts up to a maximum of 632 * Repeatedly service interrupts up to a maximum of
632 * MAX_REQS_SERVICED_PER_INTR 633 * MAX_REQS_SERVICED_PER_INTR
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index e3957ca5b645..58afd135aa1d 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -7,15 +7,22 @@
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9 9
10static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
11{
12 writel(cmd, isp_nvram(ha));
13 readl(isp_nvram(ha));
14 udelay(1);
15}
16
10static inline int eeprom_size(struct scsi_qla_host *ha) 17static inline int eeprom_size(struct scsi_qla_host *ha)
11{ 18{
12 return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16; 19 return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16;
13} 20}
14 21
15static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha) 22static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
16{ 23{
17 return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 : 24 return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 :
18 FM93C56A_NO_ADDR_BITS_16; 25 FM93C86A_NO_ADDR_BITS_16 ;
19} 26}
20 27
21static inline int eeprom_no_data_bits(struct scsi_qla_host *ha) 28static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
@@ -28,8 +35,7 @@ static int fm93c56a_select(struct scsi_qla_host * ha)
28 DEBUG5(printk(KERN_ERR "fm93c56a_select:\n")); 35 DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
29 36
30 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000; 37 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
31 writel(ha->eeprom_cmd_data, isp_nvram(ha)); 38 eeprom_cmd(ha->eeprom_cmd_data, ha);
32 readl(isp_nvram(ha));
33 return 1; 39 return 1;
34} 40}
35 41
@@ -41,12 +47,13 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
41 int previousBit; 47 int previousBit;
42 48
43 /* Clock in a zero, then do the start bit. */ 49 /* Clock in a zero, then do the start bit. */
44 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha)); 50 eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha);
45 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 51
46 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha)); 52 eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
47 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 53 AUBURN_EEPROM_CLK_RISE, ha);
48 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha)); 54 eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
49 readl(isp_nvram(ha)); 55 AUBURN_EEPROM_CLK_FALL, ha);
56
50 mask = 1 << (FM93C56A_CMD_BITS - 1); 57 mask = 1 << (FM93C56A_CMD_BITS - 1);
51 58
52 /* Force the previous data bit to be different. */ 59 /* Force the previous data bit to be different. */
@@ -60,14 +67,14 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
60 * If the bit changed, then change the DO state to 67 * If the bit changed, then change the DO state to
61 * match. 68 * match.
62 */ 69 */
63 writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha)); 70 eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
64 previousBit = dataBit; 71 previousBit = dataBit;
65 } 72 }
66 writel(ha->eeprom_cmd_data | dataBit | 73 eeprom_cmd(ha->eeprom_cmd_data | dataBit |
67 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha)); 74 AUBURN_EEPROM_CLK_RISE, ha);
68 writel(ha->eeprom_cmd_data | dataBit | 75 eeprom_cmd(ha->eeprom_cmd_data | dataBit |
69 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha)); 76 AUBURN_EEPROM_CLK_FALL, ha);
70 readl(isp_nvram(ha)); 77
71 cmd = cmd << 1; 78 cmd = cmd << 1;
72 } 79 }
73 mask = 1 << (eeprom_no_addr_bits(ha) - 1); 80 mask = 1 << (eeprom_no_addr_bits(ha) - 1);
@@ -82,14 +89,15 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
82 * If the bit changed, then change the DO state to 89 * If the bit changed, then change the DO state to
83 * match. 90 * match.
84 */ 91 */
85 writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha)); 92 eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
93
86 previousBit = dataBit; 94 previousBit = dataBit;
87 } 95 }
88 writel(ha->eeprom_cmd_data | dataBit | 96 eeprom_cmd(ha->eeprom_cmd_data | dataBit |
89 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha)); 97 AUBURN_EEPROM_CLK_RISE, ha);
90 writel(ha->eeprom_cmd_data | dataBit | 98 eeprom_cmd(ha->eeprom_cmd_data | dataBit |
91 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha)); 99 AUBURN_EEPROM_CLK_FALL, ha);
92 readl(isp_nvram(ha)); 100
93 addr = addr << 1; 101 addr = addr << 1;
94 } 102 }
95 return 1; 103 return 1;
@@ -98,8 +106,7 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
98static int fm93c56a_deselect(struct scsi_qla_host * ha) 106static int fm93c56a_deselect(struct scsi_qla_host * ha)
99{ 107{
100 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000; 108 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
101 writel(ha->eeprom_cmd_data, isp_nvram(ha)); 109 eeprom_cmd(ha->eeprom_cmd_data, ha);
102 readl(isp_nvram(ha));
103 return 1; 110 return 1;
104} 111}
105 112
@@ -112,12 +119,13 @@ static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
112 /* Read the data bits 119 /* Read the data bits
113 * The first bit is a dummy. Clock right over it. */ 120 * The first bit is a dummy. Clock right over it. */
114 for (i = 0; i < eeprom_no_data_bits(ha); i++) { 121 for (i = 0; i < eeprom_no_data_bits(ha); i++) {
115 writel(ha->eeprom_cmd_data | 122 eeprom_cmd(ha->eeprom_cmd_data |
116 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha)); 123 AUBURN_EEPROM_CLK_RISE, ha);
117 writel(ha->eeprom_cmd_data | 124 eeprom_cmd(ha->eeprom_cmd_data |
118 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha)); 125 AUBURN_EEPROM_CLK_FALL, ha);
119 dataBit = 126
120 (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0; 127 dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
128
121 data = (data << 1) | dataBit; 129 data = (data << 1) | dataBit;
122 } 130 }
123 131
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index 08e2aed8c6cc..b47b4fc59d83 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -134,9 +134,7 @@ struct eeprom_data {
134 u16 phyConfig; /* x36 */ 134 u16 phyConfig; /* x36 */
135#define PHY_CONFIG_PHY_ADDR_MASK 0x1f 135#define PHY_CONFIG_PHY_ADDR_MASK 0x1f
136#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20 136#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
137 u16 topcat; /* x38 */ 137 u16 reserved_56; /* x38 */
138#define TOPCAT_PRESENT 0x0100
139#define TOPCAT_MASK 0xFF00
140 138
141#define EEPROM_UNUSED_1_SIZE 2 139#define EEPROM_UNUSED_1_SIZE 2
142 u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */ 140 u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5b8db6109536..969c9e431028 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -708,10 +708,10 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
708} 708}
709 709
710/** 710/**
711 * qla4010_soft_reset - performs soft reset. 711 * qla4xxx_soft_reset - performs soft reset.
712 * @ha: Pointer to host adapter structure. 712 * @ha: Pointer to host adapter structure.
713 **/ 713 **/
714static int qla4010_soft_reset(struct scsi_qla_host *ha) 714int qla4xxx_soft_reset(struct scsi_qla_host *ha)
715{ 715{
716 uint32_t max_wait_time; 716 uint32_t max_wait_time;
717 unsigned long flags = 0; 717 unsigned long flags = 0;
@@ -817,29 +817,6 @@ static int qla4010_soft_reset(struct scsi_qla_host *ha)
817} 817}
818 818
819/** 819/**
820 * qla4xxx_topcat_reset - performs hard reset of TopCat Chip.
821 * @ha: Pointer to host adapter structure.
822 **/
823static int qla4xxx_topcat_reset(struct scsi_qla_host *ha)
824{
825 unsigned long flags;
826
827 ql4xxx_lock_nvram(ha);
828 spin_lock_irqsave(&ha->hardware_lock, flags);
829 writel(set_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
830 readl(isp_gp_out(ha));
831 mdelay(1);
832
833 writel(clr_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
834 readl(isp_gp_out(ha));
835 spin_unlock_irqrestore(&ha->hardware_lock, flags);
836 mdelay(2523);
837
838 ql4xxx_unlock_nvram(ha);
839 return QLA_SUCCESS;
840}
841
842/**
843 * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S. 820 * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S.
844 * @ha: Pointer to host adapter structure. 821 * @ha: Pointer to host adapter structure.
845 * 822 *
@@ -867,26 +844,6 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
867} 844}
868 845
869/** 846/**
870 * qla4xxx_hard_reset - performs HBA Hard Reset
871 * @ha: Pointer to host adapter structure.
872 **/
873static int qla4xxx_hard_reset(struct scsi_qla_host *ha)
874{
875 /* The QLA4010 really doesn't have an equivalent to a hard reset */
876 qla4xxx_flush_active_srbs(ha);
877 if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
878 int status = QLA_ERROR;
879
880 if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
881 (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
882 (qla4010_soft_reset(ha) == QLA_SUCCESS))
883 status = QLA_SUCCESS;
884 return status;
885 } else
886 return qla4010_soft_reset(ha);
887}
888
889/**
890 * qla4xxx_recover_adapter - recovers adapter after a fatal error 847 * qla4xxx_recover_adapter - recovers adapter after a fatal error
891 * @ha: Pointer to host adapter structure. 848 * @ha: Pointer to host adapter structure.
892 * @renew_ddb_list: Indicates what to do with the adapter's ddb list 849 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
@@ -919,18 +876,11 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
919 if (status == QLA_SUCCESS) { 876 if (status == QLA_SUCCESS) {
920 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n", 877 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
921 ha->host_no, __func__)); 878 ha->host_no, __func__));
922 status = qla4xxx_soft_reset(ha); 879 qla4xxx_flush_active_srbs(ha);
923 } 880 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
924 /* FIXMEkaren: Do we want to keep interrupts enabled and process 881 status = qla4xxx_soft_reset(ha);
925 AENs after soft reset */ 882 else
926 883 status = QLA_ERROR;
927 /* If firmware (SOFT) reset failed, or if all outstanding
928 * commands have not returned, then do a HARD reset.
929 */
930 if (status == QLA_ERROR) {
931 DEBUG2(printk("scsi%ld: %s - Performing hard reset..\n",
932 ha->host_no, __func__));
933 status = qla4xxx_hard_reset(ha);
934 } 884 }
935 885
936 /* Flush any pending ddb changed AENs */ 886 /* Flush any pending ddb changed AENs */
@@ -1011,18 +961,15 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
1011 * the mid-level tries to sleep when it reaches the driver threshold 961 * the mid-level tries to sleep when it reaches the driver threshold
1012 * "host->can_queue". This can cause a panic if we were in our interrupt code. 962 * "host->can_queue". This can cause a panic if we were in our interrupt code.
1013 **/ 963 **/
1014static void qla4xxx_do_dpc(void *data) 964static void qla4xxx_do_dpc(struct work_struct *work)
1015{ 965{
1016 struct scsi_qla_host *ha = (struct scsi_qla_host *) data; 966 struct scsi_qla_host *ha =
967 container_of(work, struct scsi_qla_host, dpc_work);
1017 struct ddb_entry *ddb_entry, *dtemp; 968 struct ddb_entry *ddb_entry, *dtemp;
1018 969
1019 DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n", 970 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
1020 ha->host_no, __func__)); 971 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
1021 972 ha->host_no, __func__, ha->flags, ha->dpc_flags));
1022 DEBUG2(printk("scsi%ld: %s: ha->flags = 0x%08lx\n",
1023 ha->host_no, __func__, ha->flags));
1024 DEBUG2(printk("scsi%ld: %s: ha->dpc_flags = 0x%08lx\n",
1025 ha->host_no, __func__, ha->dpc_flags));
1026 973
1027 /* Initialization not yet finished. Don't do anything yet. */ 974 /* Initialization not yet finished. Don't do anything yet. */
1028 if (!test_bit(AF_INIT_DONE, &ha->flags)) 975 if (!test_bit(AF_INIT_DONE, &ha->flags))
@@ -1032,16 +979,8 @@ static void qla4xxx_do_dpc(void *data)
1032 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 979 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1033 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 980 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1034 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) { 981 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
1035 if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) 982 if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
1036 /* 983 test_bit(DPC_RESET_HA, &ha->dpc_flags))
1037 * dg 09/23 Never initialize ddb list
1038 * once we up and running
1039 * qla4xxx_recover_adapter(ha,
1040 * REBUILD_DDB_LIST);
1041 */
1042 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
1043
1044 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1045 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST); 984 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
1046 985
1047 if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 986 if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
@@ -1122,7 +1061,8 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1122 destroy_workqueue(ha->dpc_thread); 1061 destroy_workqueue(ha->dpc_thread);
1123 1062
1124 /* Issue Soft Reset to put firmware in unknown state */ 1063 /* Issue Soft Reset to put firmware in unknown state */
1125 qla4xxx_soft_reset(ha); 1064 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
1065 qla4xxx_soft_reset(ha);
1126 1066
1127 /* Remove timer thread, if present */ 1067 /* Remove timer thread, if present */
1128 if (ha->timer_active) 1068 if (ha->timer_active)
@@ -1261,7 +1201,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1261 init_waitqueue_head(&ha->mailbox_wait_queue); 1201 init_waitqueue_head(&ha->mailbox_wait_queue);
1262 1202
1263 spin_lock_init(&ha->hardware_lock); 1203 spin_lock_init(&ha->hardware_lock);
1264 spin_lock_init(&ha->list_lock);
1265 1204
1266 /* Allocate dma buffers */ 1205 /* Allocate dma buffers */
1267 if (qla4xxx_mem_alloc(ha)) { 1206 if (qla4xxx_mem_alloc(ha)) {
@@ -1315,7 +1254,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1315 ret = -ENODEV; 1254 ret = -ENODEV;
1316 goto probe_failed; 1255 goto probe_failed;
1317 } 1256 }
1318 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha); 1257 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
1319 1258
1320 ret = request_irq(pdev->irq, qla4xxx_intr_handler, 1259 ret = request_irq(pdev->irq, qla4xxx_intr_handler,
1321 SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha); 1260 SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
@@ -1468,27 +1407,6 @@ struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t in
1468} 1407}
1469 1408
1470/** 1409/**
1471 * qla4xxx_soft_reset - performs a SOFT RESET of hba.
1472 * @ha: Pointer to host adapter structure.
1473 **/
1474int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1475{
1476
1477 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: chip reset!\n", ha->host_no,
1478 __func__));
1479 if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
1480 int status = QLA_ERROR;
1481
1482 if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
1483 (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
1484 (qla4010_soft_reset(ha) == QLA_SUCCESS) )
1485 status = QLA_SUCCESS;
1486 return status;
1487 } else
1488 return qla4010_soft_reset(ha);
1489}
1490
1491/**
1492 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 1410 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
1493 * @ha: actual ha whose done queue will contain the comd returned by firmware. 1411 * @ha: actual ha whose done queue will contain the comd returned by firmware.
1494 * @cmd: Scsi Command to wait on. 1412 * @cmd: Scsi Command to wait on.
@@ -1686,6 +1604,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
1686 .subvendor = PCI_ANY_ID, 1604 .subvendor = PCI_ANY_ID,
1687 .subdevice = PCI_ANY_ID, 1605 .subdevice = PCI_ANY_ID,
1688 }, 1606 },
1607 {
1608 .vendor = PCI_VENDOR_ID_QLOGIC,
1609 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
1610 .subvendor = PCI_ANY_ID,
1611 .subdevice = PCI_ANY_ID,
1612 },
1689 {0, 0}, 1613 {0, 0},
1690}; 1614};
1691MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 1615MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index b3fe7e68988e..454e19c8ad68 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,9 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.00.05b9-k" 8#define QLA4XXX_DRIVER_VERSION "5.00.07-k"
9
10#define QL4_DRIVER_MAJOR_VER 5
11#define QL4_DRIVER_MINOR_VER 0
12#define QL4_DRIVER_PATCH_VER 5
13#define QL4_DRIVER_BETA_VER 9
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c59f31533ab4..fafc00deaade 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -156,8 +156,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
156 156
157static DEFINE_MUTEX(host_cmd_pool_mutex); 157static DEFINE_MUTEX(host_cmd_pool_mutex);
158 158
159static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, 159struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
160 gfp_t gfp_mask)
161{ 160{
162 struct scsi_cmnd *cmd; 161 struct scsi_cmnd *cmd;
163 162
@@ -178,6 +177,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
178 177
179 return cmd; 178 return cmd;
180} 179}
180EXPORT_SYMBOL_GPL(__scsi_get_command);
181 181
182/* 182/*
183 * Function: scsi_get_command() 183 * Function: scsi_get_command()
@@ -214,9 +214,29 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
214 put_device(&dev->sdev_gendev); 214 put_device(&dev->sdev_gendev);
215 215
216 return cmd; 216 return cmd;
217} 217}
218EXPORT_SYMBOL(scsi_get_command); 218EXPORT_SYMBOL(scsi_get_command);
219 219
220void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
221 struct device *dev)
222{
223 unsigned long flags;
224
225 /* changing locks here, don't need to restore the irq state */
226 spin_lock_irqsave(&shost->free_list_lock, flags);
227 if (unlikely(list_empty(&shost->free_list))) {
228 list_add(&cmd->list, &shost->free_list);
229 cmd = NULL;
230 }
231 spin_unlock_irqrestore(&shost->free_list_lock, flags);
232
233 if (likely(cmd != NULL))
234 kmem_cache_free(shost->cmd_pool->slab, cmd);
235
236 put_device(dev);
237}
238EXPORT_SYMBOL(__scsi_put_command);
239
220/* 240/*
221 * Function: scsi_put_command() 241 * Function: scsi_put_command()
222 * 242 *
@@ -231,26 +251,15 @@ EXPORT_SYMBOL(scsi_get_command);
231void scsi_put_command(struct scsi_cmnd *cmd) 251void scsi_put_command(struct scsi_cmnd *cmd)
232{ 252{
233 struct scsi_device *sdev = cmd->device; 253 struct scsi_device *sdev = cmd->device;
234 struct Scsi_Host *shost = sdev->host;
235 unsigned long flags; 254 unsigned long flags;
236 255
237 /* serious error if the command hasn't come from a device list */ 256 /* serious error if the command hasn't come from a device list */
238 spin_lock_irqsave(&cmd->device->list_lock, flags); 257 spin_lock_irqsave(&cmd->device->list_lock, flags);
239 BUG_ON(list_empty(&cmd->list)); 258 BUG_ON(list_empty(&cmd->list));
240 list_del_init(&cmd->list); 259 list_del_init(&cmd->list);
241 spin_unlock(&cmd->device->list_lock); 260 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
242 /* changing locks here, don't need to restore the irq state */
243 spin_lock(&shost->free_list_lock);
244 if (unlikely(list_empty(&shost->free_list))) {
245 list_add(&cmd->list, &shost->free_list);
246 cmd = NULL;
247 }
248 spin_unlock_irqrestore(&shost->free_list_lock, flags);
249
250 if (likely(cmd != NULL))
251 kmem_cache_free(shost->cmd_pool->slab, cmd);
252 261
253 put_device(&sdev->sdev_gendev); 262 __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
254} 263}
255EXPORT_SYMBOL(scsi_put_command); 264EXPORT_SYMBOL(scsi_put_command);
256 265
@@ -871,9 +880,9 @@ EXPORT_SYMBOL(scsi_device_get);
871 */ 880 */
872void scsi_device_put(struct scsi_device *sdev) 881void scsi_device_put(struct scsi_device *sdev)
873{ 882{
883#ifdef CONFIG_MODULE_UNLOAD
874 struct module *module = sdev->host->hostt->module; 884 struct module *module = sdev->host->hostt->module;
875 885
876#ifdef CONFIG_MODULE_UNLOAD
877 /* The module refcount will be zero if scsi_device_get() 886 /* The module refcount will be zero if scsi_device_get()
878 * was called from a module removal routine */ 887 * was called from a module removal routine */
879 if (module && module_refcount(module) != 0) 888 if (module && module_refcount(module) != 0)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index aff1b0cfd4b2..2ecb6ff42444 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -453,9 +453,18 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
453} 453}
454 454
455/** 455/**
456 * scsi_send_eh_cmnd - send a cmd to a device as part of error recovery. 456 * scsi_send_eh_cmnd - submit a scsi command as part of error recory
457 * @scmd: SCSI Cmd to send. 457 * @scmd: SCSI command structure to hijack
458 * @timeout: Timeout for cmd. 458 * @cmnd: CDB to send
459 * @cmnd_size: size in bytes of @cmnd
460 * @timeout: timeout for this request
461 * @copy_sense: request sense data if set to 1
462 *
463 * This function is used to send a scsi command down to a target device
464 * as part of the error recovery process. If @copy_sense is 0 the command
465 * sent must be one that does not transfer any data. If @copy_sense is 1
466 * the command must be REQUEST_SENSE and this functions copies out the
467 * sense buffer it got into @scmd->sense_buffer.
459 * 468 *
460 * Return value: 469 * Return value:
461 * SUCCESS or FAILED or NEEDS_RETRY 470 * SUCCESS or FAILED or NEEDS_RETRY
@@ -469,6 +478,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
469 DECLARE_COMPLETION_ONSTACK(done); 478 DECLARE_COMPLETION_ONSTACK(done);
470 unsigned long timeleft; 479 unsigned long timeleft;
471 unsigned long flags; 480 unsigned long flags;
481 struct scatterlist sgl;
472 unsigned char old_cmnd[MAX_COMMAND_SIZE]; 482 unsigned char old_cmnd[MAX_COMMAND_SIZE];
473 enum dma_data_direction old_data_direction; 483 enum dma_data_direction old_data_direction;
474 unsigned short old_use_sg; 484 unsigned short old_use_sg;
@@ -500,19 +510,24 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
500 if (shost->hostt->unchecked_isa_dma) 510 if (shost->hostt->unchecked_isa_dma)
501 gfp_mask |= __GFP_DMA; 511 gfp_mask |= __GFP_DMA;
502 512
503 scmd->sc_data_direction = DMA_FROM_DEVICE; 513 sgl.page = alloc_page(gfp_mask);
504 scmd->request_bufflen = 252; 514 if (!sgl.page)
505 scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
506 if (!scmd->request_buffer)
507 return FAILED; 515 return FAILED;
516 sgl.offset = 0;
517 sgl.length = 252;
518
519 scmd->sc_data_direction = DMA_FROM_DEVICE;
520 scmd->request_bufflen = sgl.length;
521 scmd->request_buffer = &sgl;
522 scmd->use_sg = 1;
508 } else { 523 } else {
509 scmd->request_buffer = NULL; 524 scmd->request_buffer = NULL;
510 scmd->request_bufflen = 0; 525 scmd->request_bufflen = 0;
511 scmd->sc_data_direction = DMA_NONE; 526 scmd->sc_data_direction = DMA_NONE;
527 scmd->use_sg = 0;
512 } 528 }
513 529
514 scmd->underflow = 0; 530 scmd->underflow = 0;
515 scmd->use_sg = 0;
516 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 531 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
517 532
518 if (sdev->scsi_level <= SCSI_2) 533 if (sdev->scsi_level <= SCSI_2)
@@ -583,7 +598,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
583 memcpy(scmd->sense_buffer, scmd->request_buffer, 598 memcpy(scmd->sense_buffer, scmd->request_buffer,
584 sizeof(scmd->sense_buffer)); 599 sizeof(scmd->sense_buffer));
585 } 600 }
586 kfree(scmd->request_buffer); 601 __free_page(sgl.page);
587 } 602 }
588 603
589 604
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3ac4890ce086..fb616c69151f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -704,7 +704,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
704 return NULL; 704 return NULL;
705} 705}
706 706
707static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 707struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
708{ 708{
709 struct scsi_host_sg_pool *sgp; 709 struct scsi_host_sg_pool *sgp;
710 struct scatterlist *sgl; 710 struct scatterlist *sgl;
@@ -745,7 +745,9 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_m
745 return sgl; 745 return sgl;
746} 746}
747 747
748static void scsi_free_sgtable(struct scatterlist *sgl, int index) 748EXPORT_SYMBOL(scsi_alloc_sgtable);
749
750void scsi_free_sgtable(struct scatterlist *sgl, int index)
749{ 751{
750 struct scsi_host_sg_pool *sgp; 752 struct scsi_host_sg_pool *sgp;
751 753
@@ -755,6 +757,8 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
755 mempool_free(sgl, sgp->pool); 757 mempool_free(sgl, sgp->pool);
756} 758}
757 759
760EXPORT_SYMBOL(scsi_free_sgtable);
761
758/* 762/*
759 * Function: scsi_release_buffers() 763 * Function: scsi_release_buffers()
760 * 764 *
@@ -996,25 +1000,14 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
996 int count; 1000 int count;
997 1001
998 /* 1002 /*
999 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer 1003 * We used to not use scatter-gather for single segment request,
1000 */
1001 if (blk_pc_request(req) && !req->bio) {
1002 cmd->request_bufflen = req->data_len;
1003 cmd->request_buffer = req->data;
1004 req->buffer = req->data;
1005 cmd->use_sg = 0;
1006 return 0;
1007 }
1008
1009 /*
1010 * we used to not use scatter-gather for single segment request,
1011 * but now we do (it makes highmem I/O easier to support without 1004 * but now we do (it makes highmem I/O easier to support without
1012 * kmapping pages) 1005 * kmapping pages)
1013 */ 1006 */
1014 cmd->use_sg = req->nr_phys_segments; 1007 cmd->use_sg = req->nr_phys_segments;
1015 1008
1016 /* 1009 /*
1017 * if sg table allocation fails, requeue request later. 1010 * If sg table allocation fails, requeue request later.
1018 */ 1011 */
1019 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1012 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1020 if (unlikely(!sgpnt)) { 1013 if (unlikely(!sgpnt)) {
@@ -1022,24 +1015,21 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1022 return BLKPREP_DEFER; 1015 return BLKPREP_DEFER;
1023 } 1016 }
1024 1017
1018 req->buffer = NULL;
1025 cmd->request_buffer = (char *) sgpnt; 1019 cmd->request_buffer = (char *) sgpnt;
1026 cmd->request_bufflen = req->nr_sectors << 9;
1027 if (blk_pc_request(req)) 1020 if (blk_pc_request(req))
1028 cmd->request_bufflen = req->data_len; 1021 cmd->request_bufflen = req->data_len;
1029 req->buffer = NULL; 1022 else
1023 cmd->request_bufflen = req->nr_sectors << 9;
1030 1024
1031 /* 1025 /*
1032 * Next, walk the list, and fill in the addresses and sizes of 1026 * Next, walk the list, and fill in the addresses and sizes of
1033 * each segment. 1027 * each segment.
1034 */ 1028 */
1035 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1029 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1036
1037 /*
1038 * mapped well, send it off
1039 */
1040 if (likely(count <= cmd->use_sg)) { 1030 if (likely(count <= cmd->use_sg)) {
1041 cmd->use_sg = count; 1031 cmd->use_sg = count;
1042 return 0; 1032 return BLKPREP_OK;
1043 } 1033 }
1044 1034
1045 printk(KERN_ERR "Incorrect number of segments after building list\n"); 1035 printk(KERN_ERR "Incorrect number of segments after building list\n");
@@ -1069,6 +1059,27 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1069 return -EOPNOTSUPP; 1059 return -EOPNOTSUPP;
1070} 1060}
1071 1061
1062static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1063 struct request *req)
1064{
1065 struct scsi_cmnd *cmd;
1066
1067 if (!req->special) {
1068 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1069 if (unlikely(!cmd))
1070 return NULL;
1071 req->special = cmd;
1072 } else {
1073 cmd = req->special;
1074 }
1075
1076 /* pull a tag out of the request if we have one */
1077 cmd->tag = req->tag;
1078 cmd->request = req;
1079
1080 return cmd;
1081}
1082
1072static void scsi_blk_pc_done(struct scsi_cmnd *cmd) 1083static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1073{ 1084{
1074 BUG_ON(!blk_pc_request(cmd->request)); 1085 BUG_ON(!blk_pc_request(cmd->request));
@@ -1081,9 +1092,37 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1081 scsi_io_completion(cmd, cmd->request_bufflen); 1092 scsi_io_completion(cmd, cmd->request_bufflen);
1082} 1093}
1083 1094
1084static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1095static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1085{ 1096{
1086 struct request *req = cmd->request; 1097 struct scsi_cmnd *cmd;
1098
1099 cmd = scsi_get_cmd_from_req(sdev, req);
1100 if (unlikely(!cmd))
1101 return BLKPREP_DEFER;
1102
1103 /*
1104 * BLOCK_PC requests may transfer data, in which case they must
1105 * a bio attached to them. Or they might contain a SCSI command
1106 * that does not transfer data, in which case they may optionally
1107 * submit a request without an attached bio.
1108 */
1109 if (req->bio) {
1110 int ret;
1111
1112 BUG_ON(!req->nr_phys_segments);
1113
1114 ret = scsi_init_io(cmd);
1115 if (unlikely(ret))
1116 return ret;
1117 } else {
1118 BUG_ON(req->data_len);
1119 BUG_ON(req->data);
1120
1121 cmd->request_bufflen = 0;
1122 cmd->request_buffer = NULL;
1123 cmd->use_sg = 0;
1124 req->buffer = NULL;
1125 }
1087 1126
1088 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1127 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1089 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1128 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@@ -1099,154 +1138,138 @@ static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1099 cmd->allowed = req->retries; 1138 cmd->allowed = req->retries;
1100 cmd->timeout_per_command = req->timeout; 1139 cmd->timeout_per_command = req->timeout;
1101 cmd->done = scsi_blk_pc_done; 1140 cmd->done = scsi_blk_pc_done;
1141 return BLKPREP_OK;
1102} 1142}
1103 1143
1104static int scsi_prep_fn(struct request_queue *q, struct request *req) 1144/*
1145 * Setup a REQ_TYPE_FS command. These are simple read/write request
1146 * from filesystems that still need to be translated to SCSI CDBs from
1147 * the ULD.
1148 */
1149static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1105{ 1150{
1106 struct scsi_device *sdev = q->queuedata;
1107 struct scsi_cmnd *cmd; 1151 struct scsi_cmnd *cmd;
1108 int specials_only = 0; 1152 struct scsi_driver *drv;
1153 int ret;
1109 1154
1110 /* 1155 /*
1111 * Just check to see if the device is online. If it isn't, we 1156 * Filesystem requests must transfer data.
1112 * refuse to process any commands. The device must be brought
1113 * online before trying any recovery commands
1114 */ 1157 */
1115 if (unlikely(!scsi_device_online(sdev))) { 1158 BUG_ON(!req->nr_phys_segments);
1116 sdev_printk(KERN_ERR, sdev, 1159
1117 "rejecting I/O to offline device\n"); 1160 cmd = scsi_get_cmd_from_req(sdev, req);
1118 goto kill; 1161 if (unlikely(!cmd))
1119 } 1162 return BLKPREP_DEFER;
1120 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1163
1121 /* OK, we're not in a running state don't prep 1164 ret = scsi_init_io(cmd);
1122 * user commands */ 1165 if (unlikely(ret))
1123 if (sdev->sdev_state == SDEV_DEL) { 1166 return ret;
1124 /* Device is fully deleted, no commands 1167
1125 * at all allowed down */ 1168 /*
1126 sdev_printk(KERN_ERR, sdev, 1169 * Initialize the actual SCSI command for this request.
1127 "rejecting I/O to dead device\n"); 1170 */
1128 goto kill; 1171 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1129 } 1172 if (unlikely(!drv->init_command(cmd))) {
1130 /* OK, we only allow special commands (i.e. not 1173 scsi_release_buffers(cmd);
1131 * user initiated ones */ 1174 scsi_put_command(cmd);
1132 specials_only = sdev->sdev_state; 1175 return BLKPREP_KILL;
1133 } 1176 }
1134 1177
1178 return BLKPREP_OK;
1179}
1180
1181static int scsi_prep_fn(struct request_queue *q, struct request *req)
1182{
1183 struct scsi_device *sdev = q->queuedata;
1184 int ret = BLKPREP_OK;
1185
1135 /* 1186 /*
1136 * Find the actual device driver associated with this command. 1187 * If the device is not in running state we will reject some
1137 * The SPECIAL requests are things like character device or 1188 * or all commands.
1138 * ioctls, which did not originate from ll_rw_blk. Note that
1139 * the special field is also used to indicate the cmd for
1140 * the remainder of a partially fulfilled request that can
1141 * come up when there is a medium error. We have to treat
1142 * these two cases differently. We differentiate by looking
1143 * at request->cmd, as this tells us the real story.
1144 */ 1189 */
1145 if (blk_special_request(req) && req->special) 1190 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1146 cmd = req->special; 1191 switch (sdev->sdev_state) {
1147 else if (blk_pc_request(req) || blk_fs_request(req)) { 1192 case SDEV_OFFLINE:
1148 if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){ 1193 /*
1149 if (specials_only == SDEV_QUIESCE || 1194 * If the device is offline we refuse to process any
1150 specials_only == SDEV_BLOCK) 1195 * commands. The device must be brought online
1151 goto defer; 1196 * before trying any recovery commands.
1152 1197 */
1153 sdev_printk(KERN_ERR, sdev, 1198 sdev_printk(KERN_ERR, sdev,
1154 "rejecting I/O to device being removed\n"); 1199 "rejecting I/O to offline device\n");
1155 goto kill; 1200 ret = BLKPREP_KILL;
1201 break;
1202 case SDEV_DEL:
1203 /*
1204 * If the device is fully deleted, we refuse to
1205 * process any commands as well.
1206 */
1207 sdev_printk(KERN_ERR, sdev,
1208 "rejecting I/O to dead device\n");
1209 ret = BLKPREP_KILL;
1210 break;
1211 case SDEV_QUIESCE:
1212 case SDEV_BLOCK:
1213 /*
1214 * If the devices is blocked we defer normal commands.
1215 */
1216 if (!(req->cmd_flags & REQ_PREEMPT))
1217 ret = BLKPREP_DEFER;
1218 break;
1219 default:
1220 /*
1221 * For any other not fully online state we only allow
1222 * special commands. In particular any user initiated
1223 * command is not allowed.
1224 */
1225 if (!(req->cmd_flags & REQ_PREEMPT))
1226 ret = BLKPREP_KILL;
1227 break;
1156 } 1228 }
1157 1229
1158 /* 1230 if (ret != BLKPREP_OK)
1159 * Now try and find a command block that we can use. 1231 goto out;
1160 */
1161 if (!req->special) {
1162 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1163 if (unlikely(!cmd))
1164 goto defer;
1165 } else
1166 cmd = req->special;
1167
1168 /* pull a tag out of the request if we have one */
1169 cmd->tag = req->tag;
1170 } else {
1171 blk_dump_rq_flags(req, "SCSI bad req");
1172 goto kill;
1173 } 1232 }
1174
1175 /* note the overloading of req->special. When the tag
1176 * is active it always means cmd. If the tag goes
1177 * back for re-queueing, it may be reset */
1178 req->special = cmd;
1179 cmd->request = req;
1180
1181 /*
1182 * FIXME: drop the lock here because the functions below
1183 * expect to be called without the queue lock held. Also,
1184 * previously, we dequeued the request before dropping the
1185 * lock. We hope REQ_STARTED prevents anything untoward from
1186 * happening now.
1187 */
1188 if (blk_fs_request(req) || blk_pc_request(req)) {
1189 int ret;
1190 1233
1234 switch (req->cmd_type) {
1235 case REQ_TYPE_BLOCK_PC:
1236 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1237 break;
1238 case REQ_TYPE_FS:
1239 ret = scsi_setup_fs_cmnd(sdev, req);
1240 break;
1241 default:
1191 /* 1242 /*
1192 * This will do a couple of things: 1243 * All other command types are not supported.
1193 * 1) Fill in the actual SCSI command.
1194 * 2) Fill in any other upper-level specific fields
1195 * (timeout).
1196 * 1244 *
1197 * If this returns 0, it means that the request failed 1245 * Note that these days the SCSI subsystem does not use
1198 * (reading past end of disk, reading offline device, 1246 * REQ_TYPE_SPECIAL requests anymore. These are only used
1199 * etc). This won't actually talk to the device, but 1247 * (directly or via blk_insert_request) by non-SCSI drivers.
1200 * some kinds of consistency checking may cause the
1201 * request to be rejected immediately.
1202 */ 1248 */
1249 blk_dump_rq_flags(req, "SCSI bad req");
1250 ret = BLKPREP_KILL;
1251 break;
1252 }
1203 1253
1204 /* 1254 out:
1205 * This sets up the scatter-gather table (allocating if 1255 switch (ret) {
1206 * required). 1256 case BLKPREP_KILL:
1207 */ 1257 req->errors = DID_NO_CONNECT << 16;
1208 ret = scsi_init_io(cmd); 1258 break;
1209 switch(ret) { 1259 case BLKPREP_DEFER:
1210 /* For BLKPREP_KILL/DEFER the cmd was released */
1211 case BLKPREP_KILL:
1212 goto kill;
1213 case BLKPREP_DEFER:
1214 goto defer;
1215 }
1216
1217 /* 1260 /*
1218 * Initialize the actual SCSI command for this request. 1261 * If we defer, the elv_next_request() returns NULL, but the
1262 * queue must be restarted, so we plug here if no returning
1263 * command will automatically do that.
1219 */ 1264 */
1220 if (blk_pc_request(req)) { 1265 if (sdev->device_busy == 0)
1221 scsi_setup_blk_pc_cmnd(cmd); 1266 blk_plug_device(q);
1222 } else if (req->rq_disk) { 1267 break;
1223 struct scsi_driver *drv; 1268 default:
1224 1269 req->cmd_flags |= REQ_DONTPREP;
1225 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1226 if (unlikely(!drv->init_command(cmd))) {
1227 scsi_release_buffers(cmd);
1228 scsi_put_command(cmd);
1229 goto kill;
1230 }
1231 }
1232 } 1270 }
1233 1271
1234 /* 1272 return ret;
1235 * The request is now prepped, no need to come back here
1236 */
1237 req->cmd_flags |= REQ_DONTPREP;
1238 return BLKPREP_OK;
1239
1240 defer:
1241 /* If we defer, the elv_next_request() returns NULL, but the
1242 * queue must be restarted, so we plug here if no returning
1243 * command will automatically do that. */
1244 if (sdev->device_busy == 0)
1245 blk_plug_device(q);
1246 return BLKPREP_DEFER;
1247 kill:
1248 req->errors = DID_NO_CONNECT << 16;
1249 return BLKPREP_KILL;
1250} 1273}
1251 1274
1252/* 1275/*
@@ -1548,29 +1571,40 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1548} 1571}
1549EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1572EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1550 1573
1551struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1574struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1575 request_fn_proc *request_fn)
1552{ 1576{
1553 struct Scsi_Host *shost = sdev->host;
1554 struct request_queue *q; 1577 struct request_queue *q;
1555 1578
1556 q = blk_init_queue(scsi_request_fn, NULL); 1579 q = blk_init_queue(request_fn, NULL);
1557 if (!q) 1580 if (!q)
1558 return NULL; 1581 return NULL;
1559 1582
1560 blk_queue_prep_rq(q, scsi_prep_fn);
1561
1562 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1583 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1563 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); 1584 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1564 blk_queue_max_sectors(q, shost->max_sectors); 1585 blk_queue_max_sectors(q, shost->max_sectors);
1565 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1586 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1566 blk_queue_segment_boundary(q, shost->dma_boundary); 1587 blk_queue_segment_boundary(q, shost->dma_boundary);
1567 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1568 blk_queue_softirq_done(q, scsi_softirq_done);
1569 1588
1570 if (!shost->use_clustering) 1589 if (!shost->use_clustering)
1571 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1590 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1572 return q; 1591 return q;
1573} 1592}
1593EXPORT_SYMBOL(__scsi_alloc_queue);
1594
1595struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1596{
1597 struct request_queue *q;
1598
1599 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1600 if (!q)
1601 return NULL;
1602
1603 blk_queue_prep_rq(q, scsi_prep_fn);
1604 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1605 blk_queue_softirq_done(q, scsi_softirq_done);
1606 return q;
1607}
1574 1608
1575void scsi_free_queue(struct request_queue *q) 1609void scsi_free_queue(struct request_queue *q)
1576{ 1610{
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5d023d44e5e7..f458c2f686d2 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -39,6 +39,9 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
39 { }; 39 { };
40#endif 40#endif
41 41
42/* scsi_scan.c */
43int scsi_complete_async_scans(void);
44
42/* scsi_devinfo.c */ 45/* scsi_devinfo.c */
43extern int scsi_get_device_flags(struct scsi_device *sdev, 46extern int scsi_get_device_flags(struct scsi_device *sdev,
44 const unsigned char *vendor, 47 const unsigned char *vendor,
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 94a274645f6f..14e635aa44ce 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -29,7 +29,9 @@
29#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/blkdev.h> 31#include <linux/blkdev.h>
32#include <asm/semaphore.h> 32#include <linux/delay.h>
33#include <linux/kthread.h>
34#include <linux/spinlock.h>
33 35
34#include <scsi/scsi.h> 36#include <scsi/scsi.h>
35#include <scsi/scsi_cmnd.h> 37#include <scsi/scsi_cmnd.h>
@@ -87,6 +89,17 @@ module_param_named(max_luns, max_scsi_luns, int, S_IRUGO|S_IWUSR);
87MODULE_PARM_DESC(max_luns, 89MODULE_PARM_DESC(max_luns,
88 "last scsi LUN (should be between 1 and 2^32-1)"); 90 "last scsi LUN (should be between 1 and 2^32-1)");
89 91
92#ifdef CONFIG_SCSI_SCAN_ASYNC
93#define SCSI_SCAN_TYPE_DEFAULT "async"
94#else
95#define SCSI_SCAN_TYPE_DEFAULT "sync"
96#endif
97
98static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
99
100module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
101MODULE_PARM_DESC(scan, "sync, async or none");
102
90/* 103/*
91 * max_scsi_report_luns: the maximum number of LUNS that will be 104 * max_scsi_report_luns: the maximum number of LUNS that will be
92 * returned from the REPORT LUNS command. 8 times this value must 105 * returned from the REPORT LUNS command. 8 times this value must
@@ -108,6 +121,68 @@ MODULE_PARM_DESC(inq_timeout,
108 "Timeout (in seconds) waiting for devices to answer INQUIRY." 121 "Timeout (in seconds) waiting for devices to answer INQUIRY."
109 " Default is 5. Some non-compliant devices need more."); 122 " Default is 5. Some non-compliant devices need more.");
110 123
124static DEFINE_SPINLOCK(async_scan_lock);
125static LIST_HEAD(scanning_hosts);
126
127struct async_scan_data {
128 struct list_head list;
129 struct Scsi_Host *shost;
130 struct completion prev_finished;
131};
132
133/**
134 * scsi_complete_async_scans - Wait for asynchronous scans to complete
135 *
136 * Asynchronous scans add themselves to the scanning_hosts list. Once
137 * that list is empty, we know that the scans are complete. Rather than
138 * waking up periodically to check the state of the list, we pretend to be
139 * a scanning task by adding ourselves at the end of the list and going to
140 * sleep. When the task before us wakes us up, we take ourselves off the
141 * list and return.
142 */
143int scsi_complete_async_scans(void)
144{
145 struct async_scan_data *data;
146
147 do {
148 if (list_empty(&scanning_hosts))
149 return 0;
150 /* If we can't get memory immediately, that's OK. Just
151 * sleep a little. Even if we never get memory, the async
152 * scans will finish eventually.
153 */
154 data = kmalloc(sizeof(*data), GFP_KERNEL);
155 if (!data)
156 msleep(1);
157 } while (!data);
158
159 data->shost = NULL;
160 init_completion(&data->prev_finished);
161
162 spin_lock(&async_scan_lock);
163 /* Check that there's still somebody else on the list */
164 if (list_empty(&scanning_hosts))
165 goto done;
166 list_add_tail(&data->list, &scanning_hosts);
167 spin_unlock(&async_scan_lock);
168
169 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
170 wait_for_completion(&data->prev_finished);
171
172 spin_lock(&async_scan_lock);
173 list_del(&data->list);
174 done:
175 spin_unlock(&async_scan_lock);
176
177 kfree(data);
178 return 0;
179}
180
181#ifdef MODULE
182/* Only exported for the benefit of scsi_wait_scan */
183EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
184#endif
185
111/** 186/**
112 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 187 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
113 * @sdev: scsi device to send command to 188 * @sdev: scsi device to send command to
@@ -362,9 +437,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
362 goto retry; 437 goto retry;
363} 438}
364 439
365static void scsi_target_reap_usercontext(void *data) 440static void scsi_target_reap_usercontext(struct work_struct *work)
366{ 441{
367 struct scsi_target *starget = data; 442 struct scsi_target *starget =
443 container_of(work, struct scsi_target, ew.work);
368 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 444 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
369 unsigned long flags; 445 unsigned long flags;
370 446
@@ -400,7 +476,7 @@ void scsi_target_reap(struct scsi_target *starget)
400 starget->state = STARGET_DEL; 476 starget->state = STARGET_DEL;
401 spin_unlock_irqrestore(shost->host_lock, flags); 477 spin_unlock_irqrestore(shost->host_lock, flags);
402 execute_in_process_context(scsi_target_reap_usercontext, 478 execute_in_process_context(scsi_target_reap_usercontext,
403 starget, &starget->ew); 479 &starget->ew);
404 return; 480 return;
405 481
406 } 482 }
@@ -619,7 +695,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
619 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 695 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
620 **/ 696 **/
621static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 697static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
622 int *bflags) 698 int *bflags, int async)
623{ 699{
624 /* 700 /*
625 * XXX do not save the inquiry, since it can change underneath us, 701 * XXX do not save the inquiry, since it can change underneath us,
@@ -805,7 +881,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
805 * register it and tell the rest of the kernel 881 * register it and tell the rest of the kernel
806 * about it. 882 * about it.
807 */ 883 */
808 if (scsi_sysfs_add_sdev(sdev) != 0) 884 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
809 return SCSI_SCAN_NO_RESPONSE; 885 return SCSI_SCAN_NO_RESPONSE;
810 886
811 return SCSI_SCAN_LUN_PRESENT; 887 return SCSI_SCAN_LUN_PRESENT;
@@ -974,7 +1050,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
974 goto out_free_result; 1050 goto out_free_result;
975 } 1051 }
976 1052
977 res = scsi_add_lun(sdev, result, &bflags); 1053 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
978 if (res == SCSI_SCAN_LUN_PRESENT) { 1054 if (res == SCSI_SCAN_LUN_PRESENT) {
979 if (bflags & BLIST_KEY) { 1055 if (bflags & BLIST_KEY) {
980 sdev->lockable = 0; 1056 sdev->lockable = 0;
@@ -1474,6 +1550,12 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
1474{ 1550{
1475 struct Scsi_Host *shost = dev_to_shost(parent); 1551 struct Scsi_Host *shost = dev_to_shost(parent);
1476 1552
1553 if (strncmp(scsi_scan_type, "none", 4) == 0)
1554 return;
1555
1556 if (!shost->async_scan)
1557 scsi_complete_async_scans();
1558
1477 mutex_lock(&shost->scan_mutex); 1559 mutex_lock(&shost->scan_mutex);
1478 if (scsi_host_scan_allowed(shost)) 1560 if (scsi_host_scan_allowed(shost))
1479 __scsi_scan_target(parent, channel, id, lun, rescan); 1561 __scsi_scan_target(parent, channel, id, lun, rescan);
@@ -1519,6 +1601,9 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1519 "%s: <%u:%u:%u>\n", 1601 "%s: <%u:%u:%u>\n",
1520 __FUNCTION__, channel, id, lun)); 1602 __FUNCTION__, channel, id, lun));
1521 1603
1604 if (!shost->async_scan)
1605 scsi_complete_async_scans();
1606
1522 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 1607 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1523 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || 1608 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1524 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun))) 1609 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
@@ -1539,14 +1624,143 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1539 return 0; 1624 return 0;
1540} 1625}
1541 1626
1627static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1628{
1629 struct scsi_device *sdev;
1630 shost_for_each_device(sdev, shost) {
1631 if (scsi_sysfs_add_sdev(sdev) != 0)
1632 scsi_destroy_sdev(sdev);
1633 }
1634}
1635
1636/**
1637 * scsi_prep_async_scan - prepare for an async scan
1638 * @shost: the host which will be scanned
1639 * Returns: a cookie to be passed to scsi_finish_async_scan()
1640 *
1641 * Tells the midlayer this host is going to do an asynchronous scan.
1642 * It reserves the host's position in the scanning list and ensures
1643 * that other asynchronous scans started after this one won't affect the
1644 * ordering of the discovered devices.
1645 */
1646static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1647{
1648 struct async_scan_data *data;
1649
1650 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1651 return NULL;
1652
1653 if (shost->async_scan) {
1654 printk("%s called twice for host %d", __FUNCTION__,
1655 shost->host_no);
1656 dump_stack();
1657 return NULL;
1658 }
1659
1660 data = kmalloc(sizeof(*data), GFP_KERNEL);
1661 if (!data)
1662 goto err;
1663 data->shost = scsi_host_get(shost);
1664 if (!data->shost)
1665 goto err;
1666 init_completion(&data->prev_finished);
1667
1668 spin_lock(&async_scan_lock);
1669 shost->async_scan = 1;
1670 if (list_empty(&scanning_hosts))
1671 complete(&data->prev_finished);
1672 list_add_tail(&data->list, &scanning_hosts);
1673 spin_unlock(&async_scan_lock);
1674
1675 return data;
1676
1677 err:
1678 kfree(data);
1679 return NULL;
1680}
1681
1682/**
1683 * scsi_finish_async_scan - asynchronous scan has finished
1684 * @data: cookie returned from earlier call to scsi_prep_async_scan()
1685 *
1686 * All the devices currently attached to this host have been found.
1687 * This function announces all the devices it has found to the rest
1688 * of the system.
1689 */
1690static void scsi_finish_async_scan(struct async_scan_data *data)
1691{
1692 struct Scsi_Host *shost;
1693
1694 if (!data)
1695 return;
1696
1697 shost = data->shost;
1698 if (!shost->async_scan) {
1699 printk("%s called twice for host %d", __FUNCTION__,
1700 shost->host_no);
1701 dump_stack();
1702 return;
1703 }
1704
1705 wait_for_completion(&data->prev_finished);
1706
1707 scsi_sysfs_add_devices(shost);
1708
1709 spin_lock(&async_scan_lock);
1710 shost->async_scan = 0;
1711 list_del(&data->list);
1712 if (!list_empty(&scanning_hosts)) {
1713 struct async_scan_data *next = list_entry(scanning_hosts.next,
1714 struct async_scan_data, list);
1715 complete(&next->prev_finished);
1716 }
1717 spin_unlock(&async_scan_lock);
1718
1719 scsi_host_put(shost);
1720 kfree(data);
1721}
1722
1723static void do_scsi_scan_host(struct Scsi_Host *shost)
1724{
1725 if (shost->hostt->scan_finished) {
1726 unsigned long start = jiffies;
1727 if (shost->hostt->scan_start)
1728 shost->hostt->scan_start(shost);
1729
1730 while (!shost->hostt->scan_finished(shost, jiffies - start))
1731 msleep(10);
1732 } else {
1733 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1734 SCAN_WILD_CARD, 0);
1735 }
1736}
1737
1738static int do_scan_async(void *_data)
1739{
1740 struct async_scan_data *data = _data;
1741 do_scsi_scan_host(data->shost);
1742 scsi_finish_async_scan(data);
1743 return 0;
1744}
1745
1542/** 1746/**
1543 * scsi_scan_host - scan the given adapter 1747 * scsi_scan_host - scan the given adapter
1544 * @shost: adapter to scan 1748 * @shost: adapter to scan
1545 **/ 1749 **/
1546void scsi_scan_host(struct Scsi_Host *shost) 1750void scsi_scan_host(struct Scsi_Host *shost)
1547{ 1751{
1548 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, 1752 struct async_scan_data *data;
1549 SCAN_WILD_CARD, 0); 1753
1754 if (strncmp(scsi_scan_type, "none", 4) == 0)
1755 return;
1756
1757 data = scsi_prep_async_scan(shost);
1758 if (!data) {
1759 do_scsi_scan_host(shost);
1760 return;
1761 }
1762
1763 kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
1550} 1764}
1551EXPORT_SYMBOL(scsi_scan_host); 1765EXPORT_SYMBOL(scsi_scan_host);
1552 1766
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e1a91665d1c2..259c90cfa367 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -218,16 +218,16 @@ static void scsi_device_cls_release(struct class_device *class_dev)
218 put_device(&sdev->sdev_gendev); 218 put_device(&sdev->sdev_gendev);
219} 219}
220 220
221static void scsi_device_dev_release_usercontext(void *data) 221static void scsi_device_dev_release_usercontext(struct work_struct *work)
222{ 222{
223 struct device *dev = data;
224 struct scsi_device *sdev; 223 struct scsi_device *sdev;
225 struct device *parent; 224 struct device *parent;
226 struct scsi_target *starget; 225 struct scsi_target *starget;
227 unsigned long flags; 226 unsigned long flags;
228 227
229 parent = dev->parent; 228 sdev = container_of(work, struct scsi_device, ew.work);
230 sdev = to_scsi_device(dev); 229
230 parent = sdev->sdev_gendev.parent;
231 starget = to_scsi_target(parent); 231 starget = to_scsi_target(parent);
232 232
233 spin_lock_irqsave(sdev->host->host_lock, flags); 233 spin_lock_irqsave(sdev->host->host_lock, flags);
@@ -258,7 +258,7 @@ static void scsi_device_dev_release_usercontext(void *data)
258static void scsi_device_dev_release(struct device *dev) 258static void scsi_device_dev_release(struct device *dev)
259{ 259{
260 struct scsi_device *sdp = to_scsi_device(dev); 260 struct scsi_device *sdp = to_scsi_device(dev);
261 execute_in_process_context(scsi_device_dev_release_usercontext, dev, 261 execute_in_process_context(scsi_device_dev_release_usercontext,
262 &sdp->ew); 262 &sdp->ew);
263} 263}
264 264
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
new file mode 100644
index 000000000000..37bbfbdb870f
--- /dev/null
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -0,0 +1,352 @@
1/*
2 * SCSI target kernel/user interface functions
3 *
4 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22#include <linux/miscdevice.h>
23#include <linux/file.h>
24#include <net/tcp.h>
25#include <scsi/scsi.h>
26#include <scsi/scsi_cmnd.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_tgt.h>
30#include <scsi/scsi_tgt_if.h>
31
32#include <asm/cacheflush.h>
33
34#include "scsi_tgt_priv.h"
35
36struct tgt_ring {
37 u32 tr_idx;
38 unsigned long tr_pages[TGT_RING_PAGES];
39 spinlock_t tr_lock;
40};
41
42/* tx_ring : kernel->user, rx_ring : user->kernel */
43static struct tgt_ring tx_ring, rx_ring;
44static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
45
46static inline void tgt_ring_idx_inc(struct tgt_ring *ring)
47{
48 if (ring->tr_idx == TGT_MAX_EVENTS - 1)
49 ring->tr_idx = 0;
50 else
51 ring->tr_idx++;
52}
53
54static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx)
55{
56 u32 pidx, off;
57
58 pidx = idx / TGT_EVENT_PER_PAGE;
59 off = idx % TGT_EVENT_PER_PAGE;
60
61 return (struct tgt_event *)
62 (ring->tr_pages[pidx] + sizeof(struct tgt_event) * off);
63}
64
65static int tgt_uspace_send_event(u32 type, struct tgt_event *p)
66{
67 struct tgt_event *ev;
68 struct tgt_ring *ring = &tx_ring;
69 unsigned long flags;
70 int err = 0;
71
72 spin_lock_irqsave(&ring->tr_lock, flags);
73
74 ev = tgt_head_event(ring, ring->tr_idx);
75 if (!ev->hdr.status)
76 tgt_ring_idx_inc(ring);
77 else
78 err = -BUSY;
79
80 spin_unlock_irqrestore(&ring->tr_lock, flags);
81
82 if (err)
83 return err;
84
85 memcpy(ev, p, sizeof(*ev));
86 ev->hdr.type = type;
87 mb();
88 ev->hdr.status = 1;
89
90 flush_dcache_page(virt_to_page(ev));
91
92 wake_up_interruptible(&tgt_poll_wait);
93
94 return 0;
95}
96
97int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun, u64 tag)
98{
99 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
100 struct tgt_event ev;
101 int err;
102
103 memset(&ev, 0, sizeof(ev));
104 ev.p.cmd_req.host_no = shost->host_no;
105 ev.p.cmd_req.data_len = cmd->request_bufflen;
106 memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
107 memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
108 ev.p.cmd_req.attribute = cmd->tag;
109 ev.p.cmd_req.tag = tag;
110
111 dprintk("%p %d %u %x %llx\n", cmd, shost->host_no,
112 ev.p.cmd_req.data_len, cmd->tag,
113 (unsigned long long) ev.p.cmd_req.tag);
114
115 err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev);
116 if (err)
117 eprintk("tx buf is full, could not send\n");
118
119 return err;
120}
121
122int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag)
123{
124 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
125 struct tgt_event ev;
126 int err;
127
128 memset(&ev, 0, sizeof(ev));
129 ev.p.cmd_done.host_no = shost->host_no;
130 ev.p.cmd_done.tag = tag;
131 ev.p.cmd_done.result = cmd->result;
132
133 dprintk("%p %d %llu %u %x\n", cmd, shost->host_no,
134 (unsigned long long) ev.p.cmd_req.tag,
135 ev.p.cmd_req.data_len, cmd->tag);
136
137 err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev);
138 if (err)
139 eprintk("tx buf is full, could not send\n");
140
141 return err;
142}
143
144int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
145 struct scsi_lun *scsilun, void *data)
146{
147 struct tgt_event ev;
148 int err;
149
150 memset(&ev, 0, sizeof(ev));
151 ev.p.tsk_mgmt_req.host_no = host_no;
152 ev.p.tsk_mgmt_req.function = function;
153 ev.p.tsk_mgmt_req.tag = tag;
154 memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun));
155 ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data;
156
157 dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag,
158 (unsigned long long) ev.p.tsk_mgmt_req.mid);
159
160 err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev);
161 if (err)
162 eprintk("tx buf is full, could not send\n");
163
164 return err;
165}
166
167static int event_recv_msg(struct tgt_event *ev)
168{
169 int err = 0;
170
171 switch (ev->hdr.type) {
172 case TGT_UEVENT_CMD_RSP:
173 err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
174 ev->p.cmd_rsp.tag,
175 ev->p.cmd_rsp.result,
176 ev->p.cmd_rsp.len,
177 ev->p.cmd_rsp.uaddr,
178 ev->p.cmd_rsp.rw);
179 break;
180 case TGT_UEVENT_TSK_MGMT_RSP:
181 err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no,
182 ev->p.tsk_mgmt_rsp.mid,
183 ev->p.tsk_mgmt_rsp.result);
184 break;
185 default:
186 eprintk("unknown type %d\n", ev->hdr.type);
187 err = -EINVAL;
188 }
189
190 return err;
191}
192
193static ssize_t tgt_write(struct file *file, const char __user * buffer,
194 size_t count, loff_t * ppos)
195{
196 struct tgt_event *ev;
197 struct tgt_ring *ring = &rx_ring;
198
199 while (1) {
200 ev = tgt_head_event(ring, ring->tr_idx);
201 /* do we need this? */
202 flush_dcache_page(virt_to_page(ev));
203
204 if (!ev->hdr.status)
205 break;
206
207 tgt_ring_idx_inc(ring);
208 event_recv_msg(ev);
209 ev->hdr.status = 0;
210 };
211
212 return count;
213}
214
215static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait)
216{
217 struct tgt_event *ev;
218 struct tgt_ring *ring = &tx_ring;
219 unsigned long flags;
220 unsigned int mask = 0;
221 u32 idx;
222
223 poll_wait(file, &tgt_poll_wait, wait);
224
225 spin_lock_irqsave(&ring->tr_lock, flags);
226
227 idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1;
228 ev = tgt_head_event(ring, idx);
229 if (ev->hdr.status)
230 mask |= POLLIN | POLLRDNORM;
231
232 spin_unlock_irqrestore(&ring->tr_lock, flags);
233
234 return mask;
235}
236
237static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
238 struct tgt_ring *ring)
239{
240 int i, err;
241
242 for (i = 0; i < TGT_RING_PAGES; i++) {
243 struct page *page = virt_to_page(ring->tr_pages[i]);
244 err = vm_insert_page(vma, addr, page);
245 if (err)
246 return err;
247 addr += PAGE_SIZE;
248 }
249
250 return 0;
251}
252
253static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
254{
255 unsigned long addr;
256 int err;
257
258 if (vma->vm_pgoff)
259 return -EINVAL;
260
261 if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) {
262 eprintk("mmap size must be %lu, not %lu \n",
263 TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start);
264 return -EINVAL;
265 }
266
267 addr = vma->vm_start;
268 err = uspace_ring_map(vma, addr, &tx_ring);
269 if (err)
270 return err;
271 err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring);
272
273 return err;
274}
275
276static int tgt_open(struct inode *inode, struct file *file)
277{
278 tx_ring.tr_idx = rx_ring.tr_idx = 0;
279
280 return 0;
281}
282
283static struct file_operations tgt_fops = {
284 .owner = THIS_MODULE,
285 .open = tgt_open,
286 .poll = tgt_poll,
287 .write = tgt_write,
288 .mmap = tgt_mmap,
289};
290
291static struct miscdevice tgt_miscdev = {
292 .minor = MISC_DYNAMIC_MINOR,
293 .name = "tgt",
294 .fops = &tgt_fops,
295};
296
297static void tgt_ring_exit(struct tgt_ring *ring)
298{
299 int i;
300
301 for (i = 0; i < TGT_RING_PAGES; i++)
302 free_page(ring->tr_pages[i]);
303}
304
305static int tgt_ring_init(struct tgt_ring *ring)
306{
307 int i;
308
309 spin_lock_init(&ring->tr_lock);
310
311 for (i = 0; i < TGT_RING_PAGES; i++) {
312 ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL);
313 if (!ring->tr_pages[i]) {
314 eprintk("out of memory\n");
315 return -ENOMEM;
316 }
317 }
318
319 return 0;
320}
321
322void scsi_tgt_if_exit(void)
323{
324 tgt_ring_exit(&tx_ring);
325 tgt_ring_exit(&rx_ring);
326 misc_deregister(&tgt_miscdev);
327}
328
329int scsi_tgt_if_init(void)
330{
331 int err;
332
333 err = tgt_ring_init(&tx_ring);
334 if (err)
335 return err;
336
337 err = tgt_ring_init(&rx_ring);
338 if (err)
339 goto free_tx_ring;
340
341 err = misc_register(&tgt_miscdev);
342 if (err)
343 goto free_rx_ring;
344
345 return 0;
346free_rx_ring:
347 tgt_ring_exit(&rx_ring);
348free_tx_ring:
349 tgt_ring_exit(&tx_ring);
350
351 return err;
352}
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
new file mode 100644
index 000000000000..386dbae17b44
--- /dev/null
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -0,0 +1,745 @@
1/*
2 * SCSI target lib functions
3 *
4 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
5 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22#include <linux/blkdev.h>
23#include <linux/hash.h>
24#include <linux/module.h>
25#include <linux/pagemap.h>
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tgt.h>
31#include <../drivers/md/dm-bio-list.h>
32
33#include "scsi_tgt_priv.h"
34
35static struct workqueue_struct *scsi_tgtd;
36static kmem_cache_t *scsi_tgt_cmd_cache;
37
38/*
39 * TODO: this struct will be killed when the block layer supports large bios
40 * and James's work struct code is in
41 */
42struct scsi_tgt_cmd {
43 /* TODO replace work with James b's code */
44 struct work_struct work;
45 /* TODO replace the lists with a large bio */
46 struct bio_list xfer_done_list;
47 struct bio_list xfer_list;
48
49 struct list_head hash_list;
50 struct request *rq;
51 u64 tag;
52
53 void *buffer;
54 unsigned bufflen;
55};
56
57#define TGT_HASH_ORDER 4
58#define cmd_hashfn(tag) hash_long((unsigned long) (tag), TGT_HASH_ORDER)
59
60struct scsi_tgt_queuedata {
61 struct Scsi_Host *shost;
62 struct list_head cmd_hash[1 << TGT_HASH_ORDER];
63 spinlock_t cmd_hash_lock;
64};
65
66/*
67 * Function: scsi_host_get_command()
68 *
69 * Purpose: Allocate and setup a scsi command block and blk request
70 *
71 * Arguments: shost - scsi host
72 * data_dir - dma data dir
73 * gfp_mask- allocator flags
74 *
75 * Returns: The allocated scsi command structure.
76 *
77 * This should be called by target LLDs to get a command.
78 */
79struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
80 enum dma_data_direction data_dir,
81 gfp_t gfp_mask)
82{
83 int write = (data_dir == DMA_TO_DEVICE);
84 struct request *rq;
85 struct scsi_cmnd *cmd;
86 struct scsi_tgt_cmd *tcmd;
87
88 /* Bail if we can't get a reference to the device */
89 if (!get_device(&shost->shost_gendev))
90 return NULL;
91
92 tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC);
93 if (!tcmd)
94 goto put_dev;
95
96 rq = blk_get_request(shost->uspace_req_q, write, gfp_mask);
97 if (!rq)
98 goto free_tcmd;
99
100 cmd = __scsi_get_command(shost, gfp_mask);
101 if (!cmd)
102 goto release_rq;
103
104 memset(cmd, 0, sizeof(*cmd));
105 cmd->sc_data_direction = data_dir;
106 cmd->jiffies_at_alloc = jiffies;
107 cmd->request = rq;
108
109 rq->special = cmd;
110 rq->cmd_type = REQ_TYPE_SPECIAL;
111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
112 rq->end_io_data = tcmd;
113
114 bio_list_init(&tcmd->xfer_list);
115 bio_list_init(&tcmd->xfer_done_list);
116 tcmd->rq = rq;
117
118 return cmd;
119
120release_rq:
121 blk_put_request(rq);
122free_tcmd:
123 kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
124put_dev:
125 put_device(&shost->shost_gendev);
126 return NULL;
127
128}
129EXPORT_SYMBOL_GPL(scsi_host_get_command);
130
131/*
132 * Function: scsi_host_put_command()
133 *
134 * Purpose: Free a scsi command block
135 *
136 * Arguments: shost - scsi host
137 * cmd - command block to free
138 *
139 * Returns: Nothing.
140 *
141 * Notes: The command must not belong to any lists.
142 */
143void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
144{
145 struct request_queue *q = shost->uspace_req_q;
146 struct request *rq = cmd->request;
147 struct scsi_tgt_cmd *tcmd = rq->end_io_data;
148 unsigned long flags;
149
150 kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
151
152 spin_lock_irqsave(q->queue_lock, flags);
153 __blk_put_request(q, rq);
154 spin_unlock_irqrestore(q->queue_lock, flags);
155
156 __scsi_put_command(shost, cmd, &shost->shost_gendev);
157}
158EXPORT_SYMBOL_GPL(scsi_host_put_command);
159
160static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
161{
162 struct bio *bio;
163
164 /* must call bio_endio in case bio was bounced */
165 while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
166 bio_endio(bio, bio->bi_size, 0);
167 bio_unmap_user(bio);
168 }
169
170 while ((bio = bio_list_pop(&tcmd->xfer_list))) {
171 bio_endio(bio, bio->bi_size, 0);
172 bio_unmap_user(bio);
173 }
174}
175
176static void cmd_hashlist_del(struct scsi_cmnd *cmd)
177{
178 struct request_queue *q = cmd->request->q;
179 struct scsi_tgt_queuedata *qdata = q->queuedata;
180 unsigned long flags;
181 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
182
183 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
184 list_del(&tcmd->hash_list);
185 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
186}
187
188static void scsi_tgt_cmd_destroy(struct work_struct *work)
189{
190 struct scsi_tgt_cmd *tcmd =
191 container_of(work, struct scsi_tgt_cmd, work);
192 struct scsi_cmnd *cmd = tcmd->rq->special;
193
194 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
195 rq_data_dir(cmd->request));
196 /*
197 * We fix rq->cmd_flags here since when we told bio_map_user
198 * to write vm for WRITE commands, blk_rq_bio_prep set
199 * rq_data_dir the flags to READ.
200 */
201 if (cmd->sc_data_direction == DMA_TO_DEVICE)
202 cmd->request->cmd_flags |= REQ_RW;
203 else
204 cmd->request->cmd_flags &= ~REQ_RW;
205
206 scsi_unmap_user_pages(tcmd);
207 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
208}
209
210static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
211 u64 tag)
212{
213 struct scsi_tgt_queuedata *qdata = rq->q->queuedata;
214 unsigned long flags;
215 struct list_head *head;
216
217 tcmd->tag = tag;
218 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
219 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
220 head = &qdata->cmd_hash[cmd_hashfn(tag)];
221 list_add(&tcmd->hash_list, head);
222 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
223}
224
225/*
226 * scsi_tgt_alloc_queue - setup queue used for message passing
227 * shost: scsi host
228 *
229 * This should be called by the LLD after host allocation.
230 * And will be released when the host is released.
231 */
232int scsi_tgt_alloc_queue(struct Scsi_Host *shost)
233{
234 struct scsi_tgt_queuedata *queuedata;
235 struct request_queue *q;
236 int err, i;
237
238 /*
239 * Do we need to send a netlink event or should uspace
240 * just respond to the hotplug event?
241 */
242 q = __scsi_alloc_queue(shost, NULL);
243 if (!q)
244 return -ENOMEM;
245
246 queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL);
247 if (!queuedata) {
248 err = -ENOMEM;
249 goto cleanup_queue;
250 }
251 queuedata->shost = shost;
252 q->queuedata = queuedata;
253
254 /*
255 * this is a silly hack. We should probably just queue as many
256 * command as is recvd to userspace. uspace can then make
257 * sure we do not overload the HBA
258 */
259 q->nr_requests = shost->hostt->can_queue;
260 /*
261 * We currently only support software LLDs so this does
262 * not matter for now. Do we need this for the cards we support?
263 * If so we should make it a host template value.
264 */
265 blk_queue_dma_alignment(q, 0);
266 shost->uspace_req_q = q;
267
268 for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++)
269 INIT_LIST_HEAD(&queuedata->cmd_hash[i]);
270 spin_lock_init(&queuedata->cmd_hash_lock);
271
272 return 0;
273
274cleanup_queue:
275 blk_cleanup_queue(q);
276 return err;
277}
278EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue);
279
280void scsi_tgt_free_queue(struct Scsi_Host *shost)
281{
282 int i;
283 unsigned long flags;
284 struct request_queue *q = shost->uspace_req_q;
285 struct scsi_cmnd *cmd;
286 struct scsi_tgt_queuedata *qdata = q->queuedata;
287 struct scsi_tgt_cmd *tcmd, *n;
288 LIST_HEAD(cmds);
289
290 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
291
292 for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
293 list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
294 hash_list) {
295 list_del(&tcmd->hash_list);
296 list_add(&tcmd->hash_list, &cmds);
297 }
298 }
299
300 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
301
302 while (!list_empty(&cmds)) {
303 tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list);
304 list_del(&tcmd->hash_list);
305 cmd = tcmd->rq->special;
306
307 shost->hostt->eh_abort_handler(cmd);
308 scsi_tgt_cmd_destroy(&tcmd->work);
309 }
310}
311EXPORT_SYMBOL_GPL(scsi_tgt_free_queue);
312
313struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd)
314{
315 struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata;
316 return queue->shost;
317}
318EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host);
319
320/*
321 * scsi_tgt_queue_command - queue command for userspace processing
322 * @cmd: scsi command
323 * @scsilun: scsi lun
324 * @tag: unique value to identify this command for tmf
325 */
326int scsi_tgt_queue_command(struct scsi_cmnd *cmd, struct scsi_lun *scsilun,
327 u64 tag)
328{
329 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
330 int err;
331
332 init_scsi_tgt_cmd(cmd->request, tcmd, tag);
333 err = scsi_tgt_uspace_send_cmd(cmd, scsilun, tag);
334 if (err)
335 cmd_hashlist_del(cmd);
336
337 return err;
338}
339EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);
340
341/*
342 * This is run from a interrpt handler normally and the unmap
343 * needs process context so we must queue
344 */
345static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
346{
347 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
348
349 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
350
351 scsi_tgt_uspace_send_status(cmd, tcmd->tag);
352 queue_work(scsi_tgtd, &tcmd->work);
353}
354
355static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
356{
357 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
358 int err;
359
360 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
361
362 err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
363 switch (err) {
364 case SCSI_MLQUEUE_HOST_BUSY:
365 case SCSI_MLQUEUE_DEVICE_BUSY:
366 return -EAGAIN;
367 }
368
369 return 0;
370}
371
372static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
373{
374 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
375 int err;
376
377 err = __scsi_tgt_transfer_response(cmd);
378 if (!err)
379 return;
380
381 cmd->result = DID_BUS_BUSY << 16;
382 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
383 if (err <= 0)
384 /* the eh will have to pick this up */
385 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
386}
387
388static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
389{
390 struct request *rq = cmd->request;
391 struct scsi_tgt_cmd *tcmd = rq->end_io_data;
392 int count;
393
394 cmd->use_sg = rq->nr_phys_segments;
395 cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask);
396 if (!cmd->request_buffer)
397 return -ENOMEM;
398
399 cmd->request_bufflen = rq->data_len;
400
401 dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg,
402 rq_data_dir(rq));
403 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
404 if (likely(count <= cmd->use_sg)) {
405 cmd->use_sg = count;
406 return 0;
407 }
408
409 eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg);
410 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
411 return -EINVAL;
412}
413
414/* TODO: test this crap and replace bio_map_user with new interface maybe */
415static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
416 int rw)
417{
418 struct request_queue *q = cmd->request->q;
419 struct request *rq = cmd->request;
420 void *uaddr = tcmd->buffer;
421 unsigned int len = tcmd->bufflen;
422 struct bio *bio;
423 int err;
424
425 while (len > 0) {
426 dprintk("%lx %u\n", (unsigned long) uaddr, len);
427 bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw);
428 if (IS_ERR(bio)) {
429 err = PTR_ERR(bio);
430 dprintk("fail to map %lx %u %d %x\n",
431 (unsigned long) uaddr, len, err, cmd->cmnd[0]);
432 goto unmap_bios;
433 }
434
435 uaddr += bio->bi_size;
436 len -= bio->bi_size;
437
438 /*
439 * The first bio is added and merged. We could probably
440 * try to add others using scsi_merge_bio() but for now
441 * we keep it simple. The first bio should be pretty large
442 * (either hitting the 1 MB bio pages limit or a queue limit)
443 * already but for really large IO we may want to try and
444 * merge these.
445 */
446 if (!rq->bio) {
447 blk_rq_bio_prep(q, rq, bio);
448 rq->data_len = bio->bi_size;
449 } else
450 /* put list of bios to transfer in next go around */
451 bio_list_add(&tcmd->xfer_list, bio);
452 }
453
454 cmd->offset = 0;
455 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
456 if (err)
457 goto unmap_bios;
458
459 return 0;
460
461unmap_bios:
462 if (rq->bio) {
463 bio_unmap_user(rq->bio);
464 while ((bio = bio_list_pop(&tcmd->xfer_list)))
465 bio_unmap_user(bio);
466 }
467
468 return err;
469}
470
471static int scsi_tgt_transfer_data(struct scsi_cmnd *);
472
473static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
474{
475 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
476 struct bio *bio;
477 int err;
478
479 /* should we free resources here on error ? */
480 if (cmd->result) {
481send_uspace_err:
482 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
483 if (err <= 0)
484 /* the tgt uspace eh will have to pick this up */
485 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
486 return;
487 }
488
489 dprintk("cmd %p request_bufflen %u bufflen %u\n",
490 cmd, cmd->request_bufflen, tcmd->bufflen);
491
492 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
493 bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
494
495 tcmd->buffer += cmd->request_bufflen;
496 cmd->offset += cmd->request_bufflen;
497
498 if (!tcmd->xfer_list.head) {
499 scsi_tgt_transfer_response(cmd);
500 return;
501 }
502
503 dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
504 cmd, cmd->request_bufflen, tcmd->bufflen);
505
506 bio = bio_list_pop(&tcmd->xfer_list);
507 BUG_ON(!bio);
508
509 blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
510 cmd->request->data_len = bio->bi_size;
511 err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
512 if (err) {
513 cmd->result = DID_ERROR << 16;
514 goto send_uspace_err;
515 }
516
517 if (scsi_tgt_transfer_data(cmd)) {
518 cmd->result = DID_NO_CONNECT << 16;
519 goto send_uspace_err;
520 }
521}
522
523static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
524{
525 int err;
526 struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
527
528 err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
529 switch (err) {
530 case SCSI_MLQUEUE_HOST_BUSY:
531 case SCSI_MLQUEUE_DEVICE_BUSY:
532 return -EAGAIN;
533 default:
534 return 0;
535 }
536}
537
538static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
539 unsigned len)
540{
541 char __user *p = (char __user *) uaddr;
542
543 if (copy_from_user(cmd->sense_buffer, p,
544 min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) {
545 printk(KERN_ERR "Could not copy the sense buffer\n");
546 return -EIO;
547 }
548 return 0;
549}
550
551static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
552{
553 struct scsi_tgt_cmd *tcmd;
554 int err;
555
556 err = shost->hostt->eh_abort_handler(cmd);
557 if (err)
558 eprintk("fail to abort %p\n", cmd);
559
560 tcmd = cmd->request->end_io_data;
561 scsi_tgt_cmd_destroy(&tcmd->work);
562 return err;
563}
564
565static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
566{
567 struct scsi_tgt_queuedata *qdata = q->queuedata;
568 struct request *rq = NULL;
569 struct list_head *head;
570 struct scsi_tgt_cmd *tcmd;
571 unsigned long flags;
572
573 head = &qdata->cmd_hash[cmd_hashfn(tag)];
574 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
575 list_for_each_entry(tcmd, head, hash_list) {
576 if (tcmd->tag == tag) {
577 rq = tcmd->rq;
578 list_del(&tcmd->hash_list);
579 break;
580 }
581 }
582 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
583
584 return rq;
585}
586
587int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
588 unsigned long uaddr, u8 rw)
589{
590 struct Scsi_Host *shost;
591 struct scsi_cmnd *cmd;
592 struct request *rq;
593 struct scsi_tgt_cmd *tcmd;
594 int err = 0;
595
596 dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag,
597 result, len, uaddr, rw);
598
599 /* TODO: replace with a O(1) alg */
600 shost = scsi_host_lookup(host_no);
601 if (IS_ERR(shost)) {
602 printk(KERN_ERR "Could not find host no %d\n", host_no);
603 return -EINVAL;
604 }
605
606 if (!shost->uspace_req_q) {
607 printk(KERN_ERR "Not target scsi host %d\n", host_no);
608 goto done;
609 }
610
611 rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag);
612 if (!rq) {
613 printk(KERN_ERR "Could not find tag %llu\n",
614 (unsigned long long) tag);
615 err = -EINVAL;
616 goto done;
617 }
618 cmd = rq->special;
619
620 dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd,
621 result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]);
622
623 if (result == TASK_ABORTED) {
624 scsi_tgt_abort_cmd(shost, cmd);
625 goto done;
626 }
627 /*
628 * store the userspace values here, the working values are
629 * in the request_* values
630 */
631 tcmd = cmd->request->end_io_data;
632 tcmd->buffer = (void *)uaddr;
633 tcmd->bufflen = len;
634 cmd->result = result;
635
636 if (!tcmd->bufflen || cmd->request_buffer) {
637 err = __scsi_tgt_transfer_response(cmd);
638 goto done;
639 }
640
641 /*
642 * TODO: Do we need to handle case where request does not
643 * align with LLD.
644 */
645 err = scsi_map_user_pages(rq->end_io_data, cmd, rw);
646 if (err) {
647 eprintk("%p %d\n", cmd, err);
648 err = -EAGAIN;
649 goto done;
650 }
651
652 /* userspace failure */
653 if (cmd->result) {
654 if (status_byte(cmd->result) == CHECK_CONDITION)
655 scsi_tgt_copy_sense(cmd, uaddr, len);
656 err = __scsi_tgt_transfer_response(cmd);
657 goto done;
658 }
659 /* ask the target LLD to transfer the data to the buffer */
660 err = scsi_tgt_transfer_data(cmd);
661
662done:
663 scsi_host_put(shost);
664 return err;
665}
666
667int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, int function, u64 tag,
668 struct scsi_lun *scsilun, void *data)
669{
670 int err;
671
672 /* TODO: need to retry if this fails. */
673 err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, function,
674 tag, scsilun, data);
675 if (err < 0)
676 eprintk("The task management request lost!\n");
677 return err;
678}
679EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request);
680
681int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result)
682{
683 struct Scsi_Host *shost;
684 int err = -EINVAL;
685
686 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
687
688 shost = scsi_host_lookup(host_no);
689 if (IS_ERR(shost)) {
690 printk(KERN_ERR "Could not find host no %d\n", host_no);
691 return err;
692 }
693
694 if (!shost->uspace_req_q) {
695 printk(KERN_ERR "Not target scsi host %d\n", host_no);
696 goto done;
697 }
698
699 err = shost->hostt->tsk_mgmt_response(mid, result);
700done:
701 scsi_host_put(shost);
702 return err;
703}
704
705static int __init scsi_tgt_init(void)
706{
707 int err;
708
709 scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
710 sizeof(struct scsi_tgt_cmd),
711 0, 0, NULL, NULL);
712 if (!scsi_tgt_cmd_cache)
713 return -ENOMEM;
714
715 scsi_tgtd = create_workqueue("scsi_tgtd");
716 if (!scsi_tgtd) {
717 err = -ENOMEM;
718 goto free_kmemcache;
719 }
720
721 err = scsi_tgt_if_init();
722 if (err)
723 goto destroy_wq;
724
725 return 0;
726
727destroy_wq:
728 destroy_workqueue(scsi_tgtd);
729free_kmemcache:
730 kmem_cache_destroy(scsi_tgt_cmd_cache);
731 return err;
732}
733
734static void __exit scsi_tgt_exit(void)
735{
736 destroy_workqueue(scsi_tgtd);
737 scsi_tgt_if_exit();
738 kmem_cache_destroy(scsi_tgt_cmd_cache);
739}
740
741module_init(scsi_tgt_init);
742module_exit(scsi_tgt_exit);
743
744MODULE_DESCRIPTION("SCSI target core");
745MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
new file mode 100644
index 000000000000..84488c51ff62
--- /dev/null
+++ b/drivers/scsi/scsi_tgt_priv.h
@@ -0,0 +1,25 @@
1struct scsi_cmnd;
2struct scsi_lun;
3struct Scsi_Host;
4struct task_struct;
5
6/* tmp - will replace with SCSI logging stuff */
7#define eprintk(fmt, args...) \
8do { \
9 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
10} while (0)
11
12#define dprintk(fmt, args...)
13/* #define dprintk eprintk */
14
15extern void scsi_tgt_if_exit(void);
16extern int scsi_tgt_if_init(void);
17
18extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun,
19 u64 tag);
20extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag);
21extern int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
22 unsigned long uaddr, u8 rw);
23extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
24 struct scsi_lun *scsilun, void *data);
25extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 38c215a78f69..3571ce8934e7 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -241,9 +241,9 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
241#define FC_MGMTSRVR_PORTID 0x00000a 241#define FC_MGMTSRVR_PORTID 0x00000a
242 242
243 243
244static void fc_timeout_deleted_rport(void *data); 244static void fc_timeout_deleted_rport(struct work_struct *work);
245static void fc_timeout_fail_rport_io(void *data); 245static void fc_timeout_fail_rport_io(struct work_struct *work);
246static void fc_scsi_scan_rport(void *data); 246static void fc_scsi_scan_rport(struct work_struct *work);
247 247
248/* 248/*
249 * Attribute counts pre object type... 249 * Attribute counts pre object type...
@@ -1613,7 +1613,7 @@ fc_flush_work(struct Scsi_Host *shost)
1613 * 1 on success / 0 already queued / < 0 for error 1613 * 1 on success / 0 already queued / < 0 for error
1614 **/ 1614 **/
1615static int 1615static int
1616fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, 1616fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
1617 unsigned long delay) 1617 unsigned long delay)
1618{ 1618{
1619 if (unlikely(!fc_host_devloss_work_q(shost))) { 1619 if (unlikely(!fc_host_devloss_work_q(shost))) {
@@ -1625,9 +1625,6 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
1625 return -EINVAL; 1625 return -EINVAL;
1626 } 1626 }
1627 1627
1628 if (delay == 0)
1629 return queue_work(fc_host_devloss_work_q(shost), work);
1630
1631 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); 1628 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
1632} 1629}
1633 1630
@@ -1712,12 +1709,13 @@ EXPORT_SYMBOL(fc_remove_host);
1712 * fc_starget_delete - called to delete the scsi decendents of an rport 1709 * fc_starget_delete - called to delete the scsi decendents of an rport
1713 * (target and all sdevs) 1710 * (target and all sdevs)
1714 * 1711 *
1715 * @data: remote port to be operated on. 1712 * @work: remote port to be operated on.
1716 **/ 1713 **/
1717static void 1714static void
1718fc_starget_delete(void *data) 1715fc_starget_delete(struct work_struct *work)
1719{ 1716{
1720 struct fc_rport *rport = (struct fc_rport *)data; 1717 struct fc_rport *rport =
1718 container_of(work, struct fc_rport, stgt_delete_work);
1721 struct Scsi_Host *shost = rport_to_shost(rport); 1719 struct Scsi_Host *shost = rport_to_shost(rport);
1722 unsigned long flags; 1720 unsigned long flags;
1723 struct fc_internal *i = to_fc_internal(shost->transportt); 1721 struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1751,12 +1749,13 @@ fc_starget_delete(void *data)
1751/** 1749/**
1752 * fc_rport_final_delete - finish rport termination and delete it. 1750 * fc_rport_final_delete - finish rport termination and delete it.
1753 * 1751 *
1754 * @data: remote port to be deleted. 1752 * @work: remote port to be deleted.
1755 **/ 1753 **/
1756static void 1754static void
1757fc_rport_final_delete(void *data) 1755fc_rport_final_delete(struct work_struct *work)
1758{ 1756{
1759 struct fc_rport *rport = (struct fc_rport *)data; 1757 struct fc_rport *rport =
1758 container_of(work, struct fc_rport, rport_delete_work);
1760 struct device *dev = &rport->dev; 1759 struct device *dev = &rport->dev;
1761 struct Scsi_Host *shost = rport_to_shost(rport); 1760 struct Scsi_Host *shost = rport_to_shost(rport);
1762 struct fc_internal *i = to_fc_internal(shost->transportt); 1761 struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1770,7 +1769,7 @@ fc_rport_final_delete(void *data)
1770 1769
1771 /* Delete SCSI target and sdevs */ 1770 /* Delete SCSI target and sdevs */
1772 if (rport->scsi_target_id != -1) 1771 if (rport->scsi_target_id != -1)
1773 fc_starget_delete(data); 1772 fc_starget_delete(&rport->stgt_delete_work);
1774 else if (i->f->dev_loss_tmo_callbk) 1773 else if (i->f->dev_loss_tmo_callbk)
1775 i->f->dev_loss_tmo_callbk(rport); 1774 i->f->dev_loss_tmo_callbk(rport);
1776 else if (i->f->terminate_rport_io) 1775 else if (i->f->terminate_rport_io)
@@ -1829,11 +1828,11 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1829 rport->channel = channel; 1828 rport->channel = channel;
1830 rport->fast_io_fail_tmo = -1; 1829 rport->fast_io_fail_tmo = -1;
1831 1830
1832 INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); 1831 INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
1833 INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport); 1832 INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
1834 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); 1833 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
1835 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); 1834 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
1836 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); 1835 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
1837 1836
1838 spin_lock_irqsave(shost->host_lock, flags); 1837 spin_lock_irqsave(shost->host_lock, flags);
1839 1838
@@ -1963,7 +1962,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1963 } 1962 }
1964 1963
1965 if (match) { 1964 if (match) {
1966 struct work_struct *work = 1965 struct delayed_work *work =
1967 &rport->dev_loss_work; 1966 &rport->dev_loss_work;
1968 1967
1969 memcpy(&rport->node_name, &ids->node_name, 1968 memcpy(&rport->node_name, &ids->node_name,
@@ -2267,12 +2266,13 @@ EXPORT_SYMBOL(fc_remote_port_rolechg);
2267 * was a SCSI target (thus was blocked), and failed 2266 * was a SCSI target (thus was blocked), and failed
2268 * to return in the alloted time. 2267 * to return in the alloted time.
2269 * 2268 *
2270 * @data: rport target that failed to reappear in the alloted time. 2269 * @work: rport target that failed to reappear in the alloted time.
2271 **/ 2270 **/
2272static void 2271static void
2273fc_timeout_deleted_rport(void *data) 2272fc_timeout_deleted_rport(struct work_struct *work)
2274{ 2273{
2275 struct fc_rport *rport = (struct fc_rport *)data; 2274 struct fc_rport *rport =
2275 container_of(work, struct fc_rport, dev_loss_work.work);
2276 struct Scsi_Host *shost = rport_to_shost(rport); 2276 struct Scsi_Host *shost = rport_to_shost(rport);
2277 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2277 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2278 unsigned long flags; 2278 unsigned long flags;
@@ -2366,15 +2366,16 @@ fc_timeout_deleted_rport(void *data)
2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a 2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
2367 * disconnected SCSI target. 2367 * disconnected SCSI target.
2368 * 2368 *
2369 * @data: rport to terminate io on. 2369 * @work: rport to terminate io on.
2370 * 2370 *
2371 * Notes: Only requests the failure of the io, not that all are flushed 2371 * Notes: Only requests the failure of the io, not that all are flushed
2372 * prior to returning. 2372 * prior to returning.
2373 **/ 2373 **/
2374static void 2374static void
2375fc_timeout_fail_rport_io(void *data) 2375fc_timeout_fail_rport_io(struct work_struct *work)
2376{ 2376{
2377 struct fc_rport *rport = (struct fc_rport *)data; 2377 struct fc_rport *rport =
2378 container_of(work, struct fc_rport, fail_io_work.work);
2378 struct Scsi_Host *shost = rport_to_shost(rport); 2379 struct Scsi_Host *shost = rport_to_shost(rport);
2379 struct fc_internal *i = to_fc_internal(shost->transportt); 2380 struct fc_internal *i = to_fc_internal(shost->transportt);
2380 2381
@@ -2387,12 +2388,13 @@ fc_timeout_fail_rport_io(void *data)
2387/** 2388/**
2388 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 2389 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
2389 * 2390 *
2390 * @data: remote port to be scanned. 2391 * @work: remote port to be scanned.
2391 **/ 2392 **/
2392static void 2393static void
2393fc_scsi_scan_rport(void *data) 2394fc_scsi_scan_rport(struct work_struct *work)
2394{ 2395{
2395 struct fc_rport *rport = (struct fc_rport *)data; 2396 struct fc_rport *rport =
2397 container_of(work, struct fc_rport, scan_work);
2396 struct Scsi_Host *shost = rport_to_shost(rport); 2398 struct Scsi_Host *shost = rport_to_shost(rport);
2397 unsigned long flags; 2399 unsigned long flags;
2398 2400
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9b25124a989e..9c22f1342715 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -234,9 +234,11 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
234 return 0; 234 return 0;
235} 235}
236 236
237static void session_recovery_timedout(void *data) 237static void session_recovery_timedout(struct work_struct *work)
238{ 238{
239 struct iscsi_cls_session *session = data; 239 struct iscsi_cls_session *session =
240 container_of(work, struct iscsi_cls_session,
241 recovery_work.work);
240 242
241 dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " 243 dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
242 "out after %d secs\n", session->recovery_tmo); 244 "out after %d secs\n", session->recovery_tmo);
@@ -276,7 +278,7 @@ iscsi_alloc_session(struct Scsi_Host *shost,
276 278
277 session->transport = transport; 279 session->transport = transport;
278 session->recovery_tmo = 120; 280 session->recovery_tmo = 120;
279 INIT_WORK(&session->recovery_work, session_recovery_timedout, session); 281 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
280 INIT_LIST_HEAD(&session->host_list); 282 INIT_LIST_HEAD(&session->host_list);
281 INIT_LIST_HEAD(&session->sess_list); 283 INIT_LIST_HEAD(&session->sess_list);
282 284
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 9f070f0d0f2b..3fded4831460 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -964,9 +964,10 @@ struct work_queue_wrapper {
964}; 964};
965 965
966static void 966static void
967spi_dv_device_work_wrapper(void *data) 967spi_dv_device_work_wrapper(struct work_struct *work)
968{ 968{
969 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 969 struct work_queue_wrapper *wqw =
970 container_of(work, struct work_queue_wrapper, work);
970 struct scsi_device *sdev = wqw->sdev; 971 struct scsi_device *sdev = wqw->sdev;
971 972
972 kfree(wqw); 973 kfree(wqw);
@@ -1006,7 +1007,7 @@ spi_schedule_dv_device(struct scsi_device *sdev)
1006 return; 1007 return;
1007 } 1008 }
1008 1009
1009 INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw); 1010 INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
1010 wqw->sdev = sdev; 1011 wqw->sdev = sdev;
1011 1012
1012 schedule_work(&wqw->work); 1013 schedule_work(&wqw->work);
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
new file mode 100644
index 000000000000..8a636103083d
--- /dev/null
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -0,0 +1,31 @@
1/*
2 * scsi_wait_scan.c
3 *
4 * Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com>
5 *
6 * This is a simple module to wait until all the async scans are
7 * complete. The idea is to use it in initrd/initramfs scripts. You
8 * modprobe it after all the modprobes of the root SCSI drivers and it
9 * will wait until they have all finished scanning their busses before
10 * allowing the boot to proceed
11 */
12
13#include <linux/module.h>
14#include "scsi_priv.h"
15
16static int __init wait_scan_init(void)
17{
18 scsi_complete_async_scans();
19 return 0;
20}
21
22static void __exit wait_scan_exit(void)
23{
24}
25
26MODULE_DESCRIPTION("SCSI wait for scans");
27MODULE_AUTHOR("James Bottomley");
28MODULE_LICENSE("GPL");
29
30late_initcall(wait_scan_init);
31module_exit(wait_scan_exit);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84ff203ffedd..f6a452846fab 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1051,6 +1051,14 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1051 &sshdr, SD_TIMEOUT, 1051 &sshdr, SD_TIMEOUT,
1052 SD_MAX_RETRIES); 1052 SD_MAX_RETRIES);
1053 1053
1054 /*
1055 * If the drive has indicated to us that it
1056 * doesn't have any media in it, don't bother
1057 * with any more polling.
1058 */
1059 if (media_not_present(sdkp, &sshdr))
1060 return;
1061
1054 if (the_result) 1062 if (the_result)
1055 sense_valid = scsi_sense_valid(&sshdr); 1063 sense_valid = scsi_sense_valid(&sshdr);
1056 retries++; 1064 retries++;
@@ -1059,14 +1067,6 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1059 ((driver_byte(the_result) & DRIVER_SENSE) && 1067 ((driver_byte(the_result) & DRIVER_SENSE) &&
1060 sense_valid && sshdr.sense_key == UNIT_ATTENTION))); 1068 sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
1061 1069
1062 /*
1063 * If the drive has indicated to us that it doesn't have
1064 * any media in it, don't bother with any of the rest of
1065 * this crap.
1066 */
1067 if (media_not_present(sdkp, &sshdr))
1068 return;
1069
1070 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) { 1070 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
1071 /* no sense, TUR either succeeded or failed 1071 /* no sense, TUR either succeeded or failed
1072 * with a status error */ 1072 * with a status error */
@@ -1467,7 +1467,6 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1467 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); 1467 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
1468 1468
1469 if (scsi_status_is_good(res)) { 1469 if (scsi_status_is_good(res)) {
1470 int ct = 0;
1471 int offset = data.header_length + data.block_descriptor_length; 1470 int offset = data.header_length + data.block_descriptor_length;
1472 1471
1473 if (offset >= SD_BUF_SIZE - 2) { 1472 if (offset >= SD_BUF_SIZE - 2) {
@@ -1496,11 +1495,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1496 sdkp->DPOFUA = 0; 1495 sdkp->DPOFUA = 0;
1497 } 1496 }
1498 1497
1499 ct = sdkp->RCD + 2*sdkp->WCE; 1498 printk(KERN_NOTICE "SCSI device %s: "
1500 1499 "write cache: %s, read cache: %s, %s\n",
1501 printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n", 1500 diskname,
1502 diskname, sd_cache_types[ct], 1501 sdkp->WCE ? "enabled" : "disabled",
1503 sdkp->DPOFUA ? " w/ FUA" : ""); 1502 sdkp->RCD ? "disabled" : "enabled",
1503 sdkp->DPOFUA ? "supports DPO and FUA"
1504 : "doesn't support DPO or FUA");
1504 1505
1505 return; 1506 return;
1506 } 1507 }
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e1a52c525ed4..587274dd7059 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -9,7 +9,7 @@
9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, 9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
10 Michael Schaefer, J"org Weule, and Eric Youngdale. 10 Michael Schaefer, J"org Weule, and Eric Youngdale.
11 11
12 Copyright 1992 - 2005 Kai Makisara 12 Copyright 1992 - 2006 Kai Makisara
13 email Kai.Makisara@kolumbus.fi 13 email Kai.Makisara@kolumbus.fi
14 14
15 Some small formal changes - aeb, 950809 15 Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20050830"; 20static const char *verstr = "20061107";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -999,7 +999,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
999 STp->min_block = ((STp->buffer)->b_data[4] << 8) | 999 STp->min_block = ((STp->buffer)->b_data[4] << 8) |
1000 (STp->buffer)->b_data[5]; 1000 (STp->buffer)->b_data[5];
1001 if ( DEB( debugging || ) !STp->inited) 1001 if ( DEB( debugging || ) !STp->inited)
1002 printk(KERN_WARNING 1002 printk(KERN_INFO
1003 "%s: Block limits %d - %d bytes.\n", name, 1003 "%s: Block limits %d - %d bytes.\n", name,
1004 STp->min_block, STp->max_block); 1004 STp->min_block, STp->max_block);
1005 } else { 1005 } else {
@@ -1224,7 +1224,7 @@ static int st_flush(struct file *filp, fl_owner_t id)
1224 } 1224 }
1225 1225
1226 DEBC( if (STp->nbr_requests) 1226 DEBC( if (STp->nbr_requests)
1227 printk(KERN_WARNING "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n", 1227 printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
1228 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable)); 1228 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable));
1229 1229
1230 if (STps->rw == ST_WRITING && !STp->pos_unknown) { 1230 if (STps->rw == ST_WRITING && !STp->pos_unknown) {
@@ -4056,11 +4056,11 @@ static int st_probe(struct device *dev)
4056 goto out_free_tape; 4056 goto out_free_tape;
4057 } 4057 }
4058 4058
4059 sdev_printk(KERN_WARNING, SDp, 4059 sdev_printk(KERN_NOTICE, SDp,
4060 "Attached scsi tape %s\n", tape_name(tpnt)); 4060 "Attached scsi tape %s\n", tape_name(tpnt));
4061 printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n", 4061 sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
4062 tape_name(tpnt), tpnt->try_dio ? "yes" : "no", 4062 tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
4063 queue_dma_alignment(SDp->request_queue) + 1); 4063 queue_dma_alignment(SDp->request_queue) + 1);
4064 4064
4065 return 0; 4065 return 0;
4066 4066
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 185c270bb043..ba6bcdaf2a6a 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -11,8 +11,6 @@
11 * Written By: 11 * Written By:
12 * Ed Lin <promise_linux@promise.com> 12 * Ed Lin <promise_linux@promise.com>
13 * 13 *
14 * Version: 3.0.0.1
15 *
16 */ 14 */
17 15
18#include <linux/init.h> 16#include <linux/init.h>
@@ -37,9 +35,9 @@
37#include <scsi/scsi_tcq.h> 35#include <scsi/scsi_tcq.h>
38 36
39#define DRV_NAME "stex" 37#define DRV_NAME "stex"
40#define ST_DRIVER_VERSION "3.0.0.1" 38#define ST_DRIVER_VERSION "3.1.0.1"
41#define ST_VER_MAJOR 3 39#define ST_VER_MAJOR 3
42#define ST_VER_MINOR 0 40#define ST_VER_MINOR 1
43#define ST_OEM 0 41#define ST_OEM 0
44#define ST_BUILD_VER 1 42#define ST_BUILD_VER 1
45 43
@@ -76,8 +74,10 @@ enum {
76 MU_STATE_STARTED = 4, 74 MU_STATE_STARTED = 4,
77 MU_STATE_RESETTING = 5, 75 MU_STATE_RESETTING = 5,
78 76
79 MU_MAX_DELAY_TIME = 240000, 77 MU_MAX_DELAY = 120,
80 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, 78 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
79 MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
80 MU_HARD_RESET_WAIT = 30000,
81 HMU_PARTNER_TYPE = 2, 81 HMU_PARTNER_TYPE = 2,
82 82
83 /* firmware returned values */ 83 /* firmware returned values */
@@ -120,7 +120,8 @@ enum {
120 120
121 st_shasta = 0, 121 st_shasta = 0,
122 st_vsc = 1, 122 st_vsc = 1,
123 st_yosemite = 2, 123 st_vsc1 = 2,
124 st_yosemite = 3,
124 125
125 PASSTHRU_REQ_TYPE = 0x00000001, 126 PASSTHRU_REQ_TYPE = 0x00000001,
126 PASSTHRU_REQ_NO_WAKEUP = 0x00000100, 127 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
@@ -150,6 +151,8 @@ enum {
150 MGT_CMD_SIGNATURE = 0xba, 151 MGT_CMD_SIGNATURE = 0xba,
151 152
152 INQUIRY_EVPD = 0x01, 153 INQUIRY_EVPD = 0x01,
154
155 ST_ADDITIONAL_MEM = 0x200000,
153}; 156};
154 157
155/* SCSI inquiry data */ 158/* SCSI inquiry data */
@@ -211,7 +214,9 @@ struct handshake_frame {
211 __le32 partner_ver_minor; 214 __le32 partner_ver_minor;
212 __le32 partner_ver_oem; 215 __le32 partner_ver_oem;
213 __le32 partner_ver_build; 216 __le32 partner_ver_build;
214 u32 reserved1[4]; 217 __le32 extra_offset; /* NEW */
218 __le32 extra_size; /* NEW */
219 u32 reserved1[2];
215}; 220};
216 221
217struct req_msg { 222struct req_msg {
@@ -302,6 +307,7 @@ struct st_hba {
302 void __iomem *mmio_base; /* iomapped PCI memory space */ 307 void __iomem *mmio_base; /* iomapped PCI memory space */
303 void *dma_mem; 308 void *dma_mem;
304 dma_addr_t dma_handle; 309 dma_addr_t dma_handle;
310 size_t dma_size;
305 311
306 struct Scsi_Host *host; 312 struct Scsi_Host *host;
307 struct pci_dev *pdev; 313 struct pci_dev *pdev;
@@ -507,6 +513,7 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
507 size_t count = sizeof(struct st_frame); 513 size_t count = sizeof(struct st_frame);
508 514
509 p = hba->copy_buffer; 515 p = hba->copy_buffer;
516 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD);
510 memset(p->base, 0, sizeof(u32)*6); 517 memset(p->base, 0, sizeof(u32)*6);
511 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); 518 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
512 p->rom_addr = 0; 519 p->rom_addr = 0;
@@ -901,27 +908,34 @@ static int stex_handshake(struct st_hba *hba)
901 void __iomem *base = hba->mmio_base; 908 void __iomem *base = hba->mmio_base;
902 struct handshake_frame *h; 909 struct handshake_frame *h;
903 dma_addr_t status_phys; 910 dma_addr_t status_phys;
904 int i; 911 u32 data;
912 unsigned long before;
905 913
906 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { 914 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
907 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); 915 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
908 readl(base + IDBL); 916 readl(base + IDBL);
909 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE 917 before = jiffies;
910 && i < MU_MAX_DELAY_TIME; i++) { 918 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
919 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
920 printk(KERN_ERR DRV_NAME
921 "(%s): no handshake signature\n",
922 pci_name(hba->pdev));
923 return -1;
924 }
911 rmb(); 925 rmb();
912 msleep(1); 926 msleep(1);
913 } 927 }
914
915 if (i == MU_MAX_DELAY_TIME) {
916 printk(KERN_ERR DRV_NAME
917 "(%s): no handshake signature\n",
918 pci_name(hba->pdev));
919 return -1;
920 }
921 } 928 }
922 929
923 udelay(10); 930 udelay(10);
924 931
932 data = readl(base + OMR1);
933 if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
934 data &= 0x0000ffff;
935 if (hba->host->can_queue > data)
936 hba->host->can_queue = data;
937 }
938
925 h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE); 939 h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
926 h->rb_phy = cpu_to_le32(hba->dma_handle); 940 h->rb_phy = cpu_to_le32(hba->dma_handle);
927 h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16); 941 h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
@@ -931,6 +945,11 @@ static int stex_handshake(struct st_hba *hba)
931 h->status_cnt = cpu_to_le16(MU_STATUS_COUNT); 945 h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
932 stex_gettime(&h->hosttime); 946 stex_gettime(&h->hosttime);
933 h->partner_type = HMU_PARTNER_TYPE; 947 h->partner_type = HMU_PARTNER_TYPE;
948 if (hba->dma_size > STEX_BUFFER_SIZE) {
949 h->extra_offset = cpu_to_le32(STEX_BUFFER_SIZE);
950 h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM);
951 } else
952 h->extra_offset = h->extra_size = 0;
934 953
935 status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE; 954 status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
936 writel(status_phys, base + IMR0); 955 writel(status_phys, base + IMR0);
@@ -944,19 +963,18 @@ static int stex_handshake(struct st_hba *hba)
944 readl(base + IDBL); /* flush */ 963 readl(base + IDBL); /* flush */
945 964
946 udelay(10); 965 udelay(10);
947 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE 966 before = jiffies;
948 && i < MU_MAX_DELAY_TIME; i++) { 967 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
968 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
969 printk(KERN_ERR DRV_NAME
970 "(%s): no signature after handshake frame\n",
971 pci_name(hba->pdev));
972 return -1;
973 }
949 rmb(); 974 rmb();
950 msleep(1); 975 msleep(1);
951 } 976 }
952 977
953 if (i == MU_MAX_DELAY_TIME) {
954 printk(KERN_ERR DRV_NAME
955 "(%s): no signature after handshake frame\n",
956 pci_name(hba->pdev));
957 return -1;
958 }
959
960 writel(0, base + IMR0); 978 writel(0, base + IMR0);
961 readl(base + IMR0); 979 readl(base + IMR0);
962 writel(0, base + OMR0); 980 writel(0, base + OMR0);
@@ -1038,9 +1056,9 @@ static void stex_hard_reset(struct st_hba *hba)
1038 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET; 1056 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1039 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); 1057 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1040 1058
1041 for (i = 0; i < MU_MAX_DELAY_TIME; i++) { 1059 for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1042 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); 1060 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1043 if (pci_cmd & PCI_COMMAND_MASTER) 1061 if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1044 break; 1062 break;
1045 msleep(1); 1063 msleep(1);
1046 } 1064 }
@@ -1100,18 +1118,18 @@ static int stex_reset(struct scsi_cmnd *cmd)
1100static int stex_biosparam(struct scsi_device *sdev, 1118static int stex_biosparam(struct scsi_device *sdev,
1101 struct block_device *bdev, sector_t capacity, int geom[]) 1119 struct block_device *bdev, sector_t capacity, int geom[])
1102{ 1120{
1103 int heads = 255, sectors = 63, cylinders; 1121 int heads = 255, sectors = 63;
1104 1122
1105 if (capacity < 0x200000) { 1123 if (capacity < 0x200000) {
1106 heads = 64; 1124 heads = 64;
1107 sectors = 32; 1125 sectors = 32;
1108 } 1126 }
1109 1127
1110 cylinders = sector_div(capacity, heads * sectors); 1128 sector_div(capacity, heads * sectors);
1111 1129
1112 geom[0] = heads; 1130 geom[0] = heads;
1113 geom[1] = sectors; 1131 geom[1] = sectors;
1114 geom[2] = cylinders; 1132 geom[2] = capacity;
1115 1133
1116 return 0; 1134 return 0;
1117} 1135}
@@ -1193,8 +1211,13 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1193 goto out_iounmap; 1211 goto out_iounmap;
1194 } 1212 }
1195 1213
1214 hba->cardtype = (unsigned int) id->driver_data;
1215 if (hba->cardtype == st_vsc && (pdev->subsystem_device & 0xf) == 0x1)
1216 hba->cardtype = st_vsc1;
1217 hba->dma_size = (hba->cardtype == st_vsc1) ?
1218 (STEX_BUFFER_SIZE + ST_ADDITIONAL_MEM) : (STEX_BUFFER_SIZE);
1196 hba->dma_mem = dma_alloc_coherent(&pdev->dev, 1219 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1197 STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL); 1220 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1198 if (!hba->dma_mem) { 1221 if (!hba->dma_mem) {
1199 err = -ENOMEM; 1222 err = -ENOMEM;
1200 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", 1223 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
@@ -1207,8 +1230,6 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1207 hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE; 1230 hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
1208 hba->mu_status = MU_STATE_STARTING; 1231 hba->mu_status = MU_STATE_STARTING;
1209 1232
1210 hba->cardtype = (unsigned int) id->driver_data;
1211
1212 /* firmware uses id/lun pair for a logical drive, but lun would be 1233 /* firmware uses id/lun pair for a logical drive, but lun would be
1213 always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use 1234 always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
1214 channel to map lun here */ 1235 channel to map lun here */
@@ -1233,7 +1254,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1233 if (err) 1254 if (err)
1234 goto out_free_irq; 1255 goto out_free_irq;
1235 1256
1236 err = scsi_init_shared_tag_map(host, ST_CAN_QUEUE); 1257 err = scsi_init_shared_tag_map(host, host->can_queue);
1237 if (err) { 1258 if (err) {
1238 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n", 1259 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
1239 pci_name(pdev)); 1260 pci_name(pdev));
@@ -1256,7 +1277,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1256out_free_irq: 1277out_free_irq:
1257 free_irq(pdev->irq, hba); 1278 free_irq(pdev->irq, hba);
1258out_pci_free: 1279out_pci_free:
1259 dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE, 1280 dma_free_coherent(&pdev->dev, hba->dma_size,
1260 hba->dma_mem, hba->dma_handle); 1281 hba->dma_mem, hba->dma_handle);
1261out_iounmap: 1282out_iounmap:
1262 iounmap(hba->mmio_base); 1283 iounmap(hba->mmio_base);
@@ -1317,7 +1338,7 @@ static void stex_hba_free(struct st_hba *hba)
1317 1338
1318 pci_release_regions(hba->pdev); 1339 pci_release_regions(hba->pdev);
1319 1340
1320 dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE, 1341 dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1321 hba->dma_mem, hba->dma_handle); 1342 hba->dma_mem, hba->dma_handle);
1322} 1343}
1323 1344
@@ -1346,15 +1367,32 @@ static void stex_shutdown(struct pci_dev *pdev)
1346} 1367}
1347 1368
1348static struct pci_device_id stex_pci_tbl[] = { 1369static struct pci_device_id stex_pci_tbl[] = {
1349 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1370 /* st_shasta */
1350 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1371 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1351 { 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1372 st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1352 { 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1373 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1353 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1374 st_shasta }, /* SuperTrak EX12350 */
1354 { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1375 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1355 { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1376 st_shasta }, /* SuperTrak EX4350 */
1356 { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, 1377 { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1357 { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yosemite }, 1378 st_shasta }, /* SuperTrak EX24350 */
1379
1380 /* st_vsc */
1381 { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1382
1383 /* st_yosemite */
1384 { 0x105a, 0x8650, PCI_ANY_ID, 0x4600, 0, 0,
1385 st_yosemite }, /* SuperTrak EX4650 */
1386 { 0x105a, 0x8650, PCI_ANY_ID, 0x4610, 0, 0,
1387 st_yosemite }, /* SuperTrak EX4650o */
1388 { 0x105a, 0x8650, PCI_ANY_ID, 0x8600, 0, 0,
1389 st_yosemite }, /* SuperTrak EX8650EL */
1390 { 0x105a, 0x8650, PCI_ANY_ID, 0x8601, 0, 0,
1391 st_yosemite }, /* SuperTrak EX8650 */
1392 { 0x105a, 0x8650, PCI_ANY_ID, 0x8602, 0, 0,
1393 st_yosemite }, /* SuperTrak EX8654 */
1394 { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1395 st_yosemite }, /* generic st_yosemite */
1358 { } /* terminate list */ 1396 { } /* terminate list */
1359}; 1397};
1360MODULE_DEVICE_TABLE(pci, stex_pci_tbl); 1398MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index 646e840266e2..76a069b7ac0b 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -8,20 +8,20 @@
8 * drew@colorado.edu 8 * drew@colorado.edu
9 * +1 (303) 440-4894 9 * +1 (303) 440-4894
10 * 10 *
11 * DISTRIBUTION RELEASE 3. 11 * DISTRIBUTION RELEASE 3.
12 * 12 *
13 * For more information, please consult 13 * For more information, please consult
14 * 14 *
15 * Trantor Systems, Ltd. 15 * Trantor Systems, Ltd.
16 * T128/T128F/T228 SCSI Host Adapter 16 * T128/T128F/T228 SCSI Host Adapter
17 * Hardware Specifications 17 * Hardware Specifications
18 * 18 *
19 * Trantor Systems, Ltd. 19 * Trantor Systems, Ltd.
20 * 5415 Randall Place 20 * 5415 Randall Place
21 * Fremont, CA 94538 21 * Fremont, CA 94538
22 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910 22 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
23 * 23 *
24 * and 24 * and
25 * 25 *
26 * NCR 5380 Family 26 * NCR 5380 Family
27 * SCSI Protocol Controller 27 * SCSI Protocol Controller
@@ -48,15 +48,15 @@
48#define TDEBUG_TRANSFER 0x2 48#define TDEBUG_TRANSFER 0x2
49 49
50/* 50/*
51 * The trantor boards are memory mapped. They use an NCR5380 or 51 * The trantor boards are memory mapped. They use an NCR5380 or
52 * equivalent (my sample board had part second sourced from ZILOG). 52 * equivalent (my sample board had part second sourced from ZILOG).
53 * NCR's recommended "Pseudo-DMA" architecture is used, where 53 * NCR's recommended "Pseudo-DMA" architecture is used, where
54 * a PAL drives the DMA signals on the 5380 allowing fast, blind 54 * a PAL drives the DMA signals on the 5380 allowing fast, blind
55 * transfers with proper handshaking. 55 * transfers with proper handshaking.
56 */ 56 */
57 57
58/* 58/*
59 * Note : a boot switch is provided for the purpose of informing the 59 * Note : a boot switch is provided for the purpose of informing the
60 * firmware to boot or not boot from attached SCSI devices. So, I imagine 60 * firmware to boot or not boot from attached SCSI devices. So, I imagine
61 * there are fewer people who've yanked the ROM like they do on the Seagate 61 * there are fewer people who've yanked the ROM like they do on the Seagate
62 * to make bootup faster, and I'll probably use this for autodetection. 62 * to make bootup faster, and I'll probably use this for autodetection.
@@ -92,19 +92,20 @@
92#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */ 92#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
93 93
94#ifndef ASM 94#ifndef ASM
95static int t128_abort(Scsi_Cmnd *); 95static int t128_abort(struct scsi_cmnd *);
96static int t128_biosparam(struct scsi_device *, struct block_device *, 96static int t128_biosparam(struct scsi_device *, struct block_device *,
97 sector_t, int*); 97 sector_t, int*);
98static int t128_detect(struct scsi_host_template *); 98static int t128_detect(struct scsi_host_template *);
99static int t128_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); 99static int t128_queue_command(struct scsi_cmnd *,
100static int t128_bus_reset(Scsi_Cmnd *); 100 void (*done)(struct scsi_cmnd *));
101static int t128_bus_reset(struct scsi_cmnd *);
101 102
102#ifndef CMD_PER_LUN 103#ifndef CMD_PER_LUN
103#define CMD_PER_LUN 2 104#define CMD_PER_LUN 2
104#endif 105#endif
105 106
106#ifndef CAN_QUEUE 107#ifndef CAN_QUEUE
107#define CAN_QUEUE 32 108#define CAN_QUEUE 32
108#endif 109#endif
109 110
110#ifndef HOSTS_C 111#ifndef HOSTS_C
@@ -120,7 +121,7 @@ static int t128_bus_reset(Scsi_Cmnd *);
120 121
121#define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20)) 122#define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20))
122 123
123#if !(TDEBUG & TDEBUG_TRANSFER) 124#if !(TDEBUG & TDEBUG_TRANSFER)
124#define NCR5380_read(reg) readb(T128_address(reg)) 125#define NCR5380_read(reg) readb(T128_address(reg))
125#define NCR5380_write(reg, value) writeb((value),(T128_address(reg))) 126#define NCR5380_write(reg, value) writeb((value),(T128_address(reg)))
126#else 127#else
@@ -129,7 +130,7 @@ static int t128_bus_reset(Scsi_Cmnd *);
129 , instance->hostno, (reg), T128_address(reg))), readb(T128_address(reg))) 130 , instance->hostno, (reg), T128_address(reg))), readb(T128_address(reg)))
130 131
131#define NCR5380_write(reg, value) { \ 132#define NCR5380_write(reg, value) { \
132 printk("scsi%d : write %02x to register %d at address %08x\n", \ 133 printk("scsi%d : write %02x to register %d at address %08x\n", \
133 instance->hostno, (value), (reg), T128_address(reg)); \ 134 instance->hostno, (value), (reg), T128_address(reg)); \
134 writeb((value), (T128_address(reg))); \ 135 writeb((value), (T128_address(reg))); \
135} 136}
@@ -142,10 +143,10 @@ static int t128_bus_reset(Scsi_Cmnd *);
142#define NCR5380_bus_reset t128_bus_reset 143#define NCR5380_bus_reset t128_bus_reset
143#define NCR5380_proc_info t128_proc_info 144#define NCR5380_proc_info t128_proc_info
144 145
145/* 15 14 12 10 7 5 3 146/* 15 14 12 10 7 5 3
146 1101 0100 1010 1000 */ 147 1101 0100 1010 1000 */
147 148
148#define T128_IRQS 0xc4a8 149#define T128_IRQS 0xc4a8
149 150
150#endif /* else def HOSTS_C */ 151#endif /* else def HOSTS_C */
151#endif /* ndef ASM */ 152#endif /* ndef ASM */
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index aee1b31f1a1c..3db206d29b33 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -60,7 +60,8 @@ struct timer_list mcfrs_timer_struct;
60#if defined(CONFIG_HW_FEITH) 60#if defined(CONFIG_HW_FEITH)
61#define CONSOLE_BAUD_RATE 38400 61#define CONSOLE_BAUD_RATE 38400
62#define DEFAULT_CBAUD B38400 62#define DEFAULT_CBAUD B38400
63#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || defined(CONFIG_M5329EVB) 63#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || \
64 defined(CONFIG_M5329EVB) || defined(CONFIG_GILBARCO)
64#define CONSOLE_BAUD_RATE 115200 65#define CONSOLE_BAUD_RATE 115200
65#define DEFAULT_CBAUD B115200 66#define DEFAULT_CBAUD B115200
66#elif defined(CONFIG_ARNEWSH) || defined(CONFIG_FREESCALE) || \ 67#elif defined(CONFIG_ARNEWSH) || defined(CONFIG_FREESCALE) || \
@@ -109,12 +110,30 @@ static struct mcf_serial mcfrs_table[] = {
109 .irq = IRQBASE, 110 .irq = IRQBASE,
110 .flags = ASYNC_BOOT_AUTOCONF, 111 .flags = ASYNC_BOOT_AUTOCONF,
111 }, 112 },
113#ifdef MCFUART_BASE2
112 { /* ttyS1 */ 114 { /* ttyS1 */
113 .magic = 0, 115 .magic = 0,
114 .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE2), 116 .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE2),
115 .irq = IRQBASE+1, 117 .irq = IRQBASE+1,
116 .flags = ASYNC_BOOT_AUTOCONF, 118 .flags = ASYNC_BOOT_AUTOCONF,
117 }, 119 },
120#endif
121#ifdef MCFUART_BASE3
122 { /* ttyS2 */
123 .magic = 0,
124 .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE3),
125 .irq = IRQBASE+2,
126 .flags = ASYNC_BOOT_AUTOCONF,
127 },
128#endif
129#ifdef MCFUART_BASE4
130 { /* ttyS3 */
131 .magic = 0,
132 .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE4),
133 .irq = IRQBASE+3,
134 .flags = ASYNC_BOOT_AUTOCONF,
135 },
136#endif
118}; 137};
119 138
120 139
@@ -1516,6 +1535,22 @@ static void mcfrs_irqinit(struct mcf_serial *info)
1516 imrp = (volatile unsigned long *) (MCF_MBAR + MCFICM_INTC0 + 1535 imrp = (volatile unsigned long *) (MCF_MBAR + MCFICM_INTC0 +
1517 MCFINTC_IMRL); 1536 MCFINTC_IMRL);
1518 *imrp &= ~((1 << (info->irq - MCFINT_VECBASE)) | 1); 1537 *imrp &= ~((1 << (info->irq - MCFINT_VECBASE)) | 1);
1538#if defined(CONFIG_M527x)
1539 {
1540 /*
1541 * External Pin Mask Setting & Enable External Pin for Interface
1542 * mrcbis@aliceposta.it
1543 */
1544 unsigned short *serpin_enable_mask;
1545 serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART);
1546 if (info->line == 0)
1547 *serpin_enable_mask |= UART0_ENABLE_MASK;
1548 else if (info->line == 1)
1549 *serpin_enable_mask |= UART1_ENABLE_MASK;
1550 else if (info->line == 2)
1551 *serpin_enable_mask |= UART2_ENABLE_MASK;
1552 }
1553#endif
1519#elif defined(CONFIG_M520x) 1554#elif defined(CONFIG_M520x)
1520 volatile unsigned char *icrp, *uartp; 1555 volatile unsigned char *icrp, *uartp;
1521 volatile unsigned long *imrp; 1556 volatile unsigned long *imrp;
@@ -1713,7 +1748,7 @@ mcfrs_init(void)
1713 /* Initialize the tty_driver structure */ 1748 /* Initialize the tty_driver structure */
1714 mcfrs_serial_driver->owner = THIS_MODULE; 1749 mcfrs_serial_driver->owner = THIS_MODULE;
1715 mcfrs_serial_driver->name = "ttyS"; 1750 mcfrs_serial_driver->name = "ttyS";
1716 mcfrs_serial_driver->driver_name = "serial"; 1751 mcfrs_serial_driver->driver_name = "mcfserial";
1717 mcfrs_serial_driver->major = TTY_MAJOR; 1752 mcfrs_serial_driver->major = TTY_MAJOR;
1718 mcfrs_serial_driver->minor_start = 64; 1753 mcfrs_serial_driver->minor_start = 64;
1719 mcfrs_serial_driver->type = TTY_DRIVER_TYPE_SERIAL; 1754 mcfrs_serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1797,10 +1832,23 @@ void mcfrs_init_console(void)
1797 uartp[MCFUART_UMR] = MCFUART_MR1_PARITYNONE | MCFUART_MR1_CS8; 1832 uartp[MCFUART_UMR] = MCFUART_MR1_PARITYNONE | MCFUART_MR1_CS8;
1798 uartp[MCFUART_UMR] = MCFUART_MR2_STOP1; 1833 uartp[MCFUART_UMR] = MCFUART_MR2_STOP1;
1799 1834
1835#ifdef CONFIG_M5272
1836{
1837 /*
1838 * For the MCF5272, also compute the baudrate fraction.
1839 */
1840 int fraction = MCF_BUSCLK - (clk * 32 * mcfrs_console_baud);
1841 fraction *= 16;
1842 fraction /= (32 * mcfrs_console_baud);
1843 uartp[MCFUART_UFPD] = (fraction & 0xf); /* set fraction */
1844 clk = (MCF_BUSCLK / mcfrs_console_baud) / 32;
1845}
1846#else
1800 clk = ((MCF_BUSCLK / mcfrs_console_baud) + 16) / 32; /* set baud */ 1847 clk = ((MCF_BUSCLK / mcfrs_console_baud) + 16) / 32; /* set baud */
1848#endif
1849
1801 uartp[MCFUART_UBG1] = (clk & 0xff00) >> 8; /* set msb baud */ 1850 uartp[MCFUART_UBG1] = (clk & 0xff00) >> 8; /* set msb baud */
1802 uartp[MCFUART_UBG2] = (clk & 0xff); /* set lsb baud */ 1851 uartp[MCFUART_UBG2] = (clk & 0xff); /* set lsb baud */
1803
1804 uartp[MCFUART_UCSR] = MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER; 1852 uartp[MCFUART_UCSR] = MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER;
1805 uartp[MCFUART_UCR] = MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE; 1853 uartp[MCFUART_UCR] = MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE;
1806 1854
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 00f9ffd69489..431433f4dd6d 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -723,7 +723,7 @@ static int serial_config(struct pcmcia_device * link)
723 u_char *buf; 723 u_char *buf;
724 cisparse_t *parse; 724 cisparse_t *parse;
725 cistpl_cftable_entry_t *cf; 725 cistpl_cftable_entry_t *cf;
726 int i, last_ret, last_fn; 726 int i;
727 727
728 DEBUG(0, "serial_config(0x%p)\n", link); 728 DEBUG(0, "serial_config(0x%p)\n", link);
729 729
@@ -740,15 +740,6 @@ static int serial_config(struct pcmcia_device * link)
740 tuple->TupleOffset = 0; 740 tuple->TupleOffset = 0;
741 tuple->TupleDataMax = 255; 741 tuple->TupleDataMax = 255;
742 tuple->Attributes = 0; 742 tuple->Attributes = 0;
743 /* Get configuration register information */
744 tuple->DesiredTuple = CISTPL_CONFIG;
745 last_ret = first_tuple(link, tuple, parse);
746 if (last_ret != CS_SUCCESS) {
747 last_fn = ParseTuple;
748 goto cs_failed;
749 }
750 link->conf.ConfigBase = parse->config.base;
751 link->conf.Present = parse->config.rmask[0];
752 743
753 /* Is this a compliant multifunction card? */ 744 /* Is this a compliant multifunction card? */
754 tuple->DesiredTuple = CISTPL_LONGLINK_MFC; 745 tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
@@ -757,27 +748,25 @@ static int serial_config(struct pcmcia_device * link)
757 748
758 /* Is this a multiport card? */ 749 /* Is this a multiport card? */
759 tuple->DesiredTuple = CISTPL_MANFID; 750 tuple->DesiredTuple = CISTPL_MANFID;
760 if (first_tuple(link, tuple, parse) == CS_SUCCESS) { 751 info->manfid = link->manf_id;
761 info->manfid = parse->manfid.manf; 752 info->prodid = link->card_id;
762 info->prodid = parse->manfid.card; 753
763 754 for (i = 0; i < ARRAY_SIZE(quirks); i++)
764 for (i = 0; i < ARRAY_SIZE(quirks); i++) 755 if ((quirks[i].manfid == ~0 ||
765 if ((quirks[i].manfid == ~0 || 756 quirks[i].manfid == info->manfid) &&
766 quirks[i].manfid == info->manfid) && 757 (quirks[i].prodid == ~0 ||
767 (quirks[i].prodid == ~0 || 758 quirks[i].prodid == info->prodid)) {
768 quirks[i].prodid == info->prodid)) { 759 info->quirk = &quirks[i];
769 info->quirk = &quirks[i]; 760 break;
770 break; 761 }
771 }
772 }
773 762
774 /* Another check for dual-serial cards: look for either serial or 763 /* Another check for dual-serial cards: look for either serial or
775 multifunction cards that ask for appropriate IO port ranges */ 764 multifunction cards that ask for appropriate IO port ranges */
776 tuple->DesiredTuple = CISTPL_FUNCID; 765 tuple->DesiredTuple = CISTPL_FUNCID;
777 if ((info->multi == 0) && 766 if ((info->multi == 0) &&
778 ((first_tuple(link, tuple, parse) != CS_SUCCESS) || 767 (link->has_func_id) &&
779 (parse->funcid.func == CISTPL_FUNCID_MULTI) || 768 ((link->func_id == CISTPL_FUNCID_MULTI) ||
780 (parse->funcid.func == CISTPL_FUNCID_SERIAL))) { 769 (link->func_id == CISTPL_FUNCID_SERIAL))) {
781 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY; 770 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
782 if (first_tuple(link, tuple, parse) == CS_SUCCESS) { 771 if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
783 if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0)) 772 if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0))
@@ -814,8 +803,6 @@ static int serial_config(struct pcmcia_device * link)
814 kfree(cfg_mem); 803 kfree(cfg_mem);
815 return 0; 804 return 0;
816 805
817 cs_failed:
818 cs_error(link, last_fn, last_ret);
819 failed: 806 failed:
820 serial_remove(link); 807 serial_remove(link);
821 kfree(cfg_mem); 808 kfree(cfg_mem);
@@ -925,6 +912,30 @@ static struct pcmcia_device_id serial_ids[] = {
925 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), 912 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
926 PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "RS-COM-2P.cis"), 913 PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "RS-COM-2P.cis"),
927 PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), 914 PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"),
915 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b),
916 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
917 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490),
918 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232",0x19ca78af,0xb6bc0235),
919 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232",0x63f2e0bd,0xb9e175d3),
920 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232-5",0x63f2e0bd,0xfce33442),
921 PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232",0x3beb8cf2,0x171e7190),
922 PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232-5",0x3beb8cf2,0x20da4262),
923 PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF428",0x3beb8cf2,0xea5dd57d),
924 PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF500",0x3beb8cf2,0xd77255fa),
925 PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: IC232",0x3beb8cf2,0x6a709903),
926 PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: SL232",0x3beb8cf2,0x18430676),
927 PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: XL232",0x3beb8cf2,0x6f933767),
928 PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
929 PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
930 PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
931 PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
932 PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
933 PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
934 PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
935 PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
936 PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
937 PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
938 PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
928 /* too generic */ 939 /* too generic */
929 /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */ 940 /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */
930 /* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */ 941 /* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 72025df5561d..494d9b856488 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -148,7 +148,7 @@ struct chip_data {
148 void (*cs_control)(u32 command); 148 void (*cs_control)(u32 command);
149}; 149};
150 150
151static void pump_messages(void *data); 151static void pump_messages(struct work_struct *work);
152 152
153static int flush(struct driver_data *drv_data) 153static int flush(struct driver_data *drv_data)
154{ 154{
@@ -884,9 +884,10 @@ static void pump_transfers(unsigned long data)
884 } 884 }
885} 885}
886 886
887static void pump_messages(void *data) 887static void pump_messages(struct work_struct *work)
888{ 888{
889 struct driver_data *drv_data = data; 889 struct driver_data *drv_data =
890 container_of(work, struct driver_data, pump_messages);
890 unsigned long flags; 891 unsigned long flags;
891 892
892 /* Lock queue and check for queue work */ 893 /* Lock queue and check for queue work */
@@ -1098,7 +1099,7 @@ static int init_queue(struct driver_data *drv_data)
1098 tasklet_init(&drv_data->pump_transfers, 1099 tasklet_init(&drv_data->pump_transfers,
1099 pump_transfers, (unsigned long)drv_data); 1100 pump_transfers, (unsigned long)drv_data);
1100 1101
1101 INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data); 1102 INIT_WORK(&drv_data->pump_messages, pump_messages);
1102 drv_data->workqueue = create_singlethread_workqueue( 1103 drv_data->workqueue = create_singlethread_workqueue(
1103 drv_data->master->cdev.dev->bus_id); 1104 drv_data->master->cdev.dev->bus_id);
1104 if (drv_data->workqueue == NULL) 1105 if (drv_data->workqueue == NULL)
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index a23862ef72b2..08c1c57c6128 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -265,9 +265,10 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
265 * Drivers can provide word-at-a-time i/o primitives, or provide 265 * Drivers can provide word-at-a-time i/o primitives, or provide
266 * transfer-at-a-time ones to leverage dma or fifo hardware. 266 * transfer-at-a-time ones to leverage dma or fifo hardware.
267 */ 267 */
268static void bitbang_work(void *_bitbang) 268static void bitbang_work(struct work_struct *work)
269{ 269{
270 struct spi_bitbang *bitbang = _bitbang; 270 struct spi_bitbang *bitbang =
271 container_of(work, struct spi_bitbang, work);
271 unsigned long flags; 272 unsigned long flags;
272 273
273 spin_lock_irqsave(&bitbang->lock, flags); 274 spin_lock_irqsave(&bitbang->lock, flags);
@@ -456,7 +457,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
456 if (!bitbang->master || !bitbang->chipselect) 457 if (!bitbang->master || !bitbang->chipselect)
457 return -EINVAL; 458 return -EINVAL;
458 459
459 INIT_WORK(&bitbang->work, bitbang_work, bitbang); 460 INIT_WORK(&bitbang->work, bitbang_work);
460 spin_lock_init(&bitbang->lock); 461 spin_lock_init(&bitbang->lock);
461 INIT_LIST_HEAD(&bitbang->queue); 462 INIT_LIST_HEAD(&bitbang->queue);
462 463
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index dda0ca45d904..164a5dcf1f1e 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -69,25 +69,21 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
69 69
70static void ixj_get_serial(struct pcmcia_device * link, IXJ * j) 70static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
71{ 71{
72 tuple_t tuple;
73 u_short buf[128];
74 char *str; 72 char *str;
75 int last_ret, last_fn, i, place; 73 int i, place;
76 DEBUG(0, "ixj_get_serial(0x%p)\n", link); 74 DEBUG(0, "ixj_get_serial(0x%p)\n", link);
77 tuple.TupleData = (cisdata_t *) buf; 75
78 tuple.TupleOffset = 0; 76 str = link->prod_id[0];
79 tuple.TupleDataMax = 80; 77 if (!str)
80 tuple.Attributes = 0; 78 goto cs_failed;
81 tuple.DesiredTuple = CISTPL_VERS_1;
82 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
83 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
84 str = (char *) buf;
85 printk("PCMCIA Version %d.%d\n", str[0], str[1]);
86 str += 2;
87 printk("%s", str); 79 printk("%s", str);
88 str = str + strlen(str) + 1; 80 str = link->prod_id[1];
81 if (!str)
82 goto cs_failed;
89 printk(" %s", str); 83 printk(" %s", str);
90 str = str + strlen(str) + 1; 84 str = link->prod_id[2];
85 if (!str)
86 goto cs_failed;
91 place = 1; 87 place = 1;
92 for (i = strlen(str) - 1; i >= 0; i--) { 88 for (i = strlen(str) - 1; i >= 0; i--) {
93 switch (str[i]) { 89 switch (str[i]) {
@@ -122,7 +118,9 @@ static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
122 } 118 }
123 place = place * 0x10; 119 place = place * 0x10;
124 } 120 }
125 str = str + strlen(str) + 1; 121 str = link->prod_id[3];
122 if (!str)
123 goto cs_failed;
126 printk(" version %s\n", str); 124 printk(" version %s\n", str);
127 cs_failed: 125 cs_failed:
128 return; 126 return;
@@ -146,13 +144,6 @@ static int ixj_config(struct pcmcia_device * link)
146 tuple.TupleData = (cisdata_t *) buf; 144 tuple.TupleData = (cisdata_t *) buf;
147 tuple.TupleOffset = 0; 145 tuple.TupleOffset = 0;
148 tuple.TupleDataMax = 255; 146 tuple.TupleDataMax = 255;
149 tuple.Attributes = 0;
150 tuple.DesiredTuple = CISTPL_CONFIG;
151 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
152 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
153 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
154 link->conf.ConfigBase = parse.config.base;
155 link->conf.Present = parse.config.rmask[0];
156 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 147 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
157 tuple.Attributes = 0; 148 tuple.Attributes = 0;
158 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 149 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index e6565633ba0f..3dfa3e40e148 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -158,7 +158,7 @@ struct cxacru_data {
158 const struct cxacru_modem_type *modem_type; 158 const struct cxacru_modem_type *modem_type;
159 159
160 int line_status; 160 int line_status;
161 struct work_struct poll_work; 161 struct delayed_work poll_work;
162 162
163 /* contol handles */ 163 /* contol handles */
164 struct mutex cm_serialize; 164 struct mutex cm_serialize;
@@ -347,7 +347,7 @@ static int cxacru_card_status(struct cxacru_data *instance)
347 return 0; 347 return 0;
348} 348}
349 349
350static void cxacru_poll_status(struct cxacru_data *instance); 350static void cxacru_poll_status(struct work_struct *work);
351 351
352static int cxacru_atm_start(struct usbatm_data *usbatm_instance, 352static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
353 struct atm_dev *atm_dev) 353 struct atm_dev *atm_dev)
@@ -376,12 +376,14 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
376 } 376 }
377 377
378 /* Start status polling */ 378 /* Start status polling */
379 cxacru_poll_status(instance); 379 cxacru_poll_status(&instance->poll_work.work);
380 return 0; 380 return 0;
381} 381}
382 382
383static void cxacru_poll_status(struct cxacru_data *instance) 383static void cxacru_poll_status(struct work_struct *work)
384{ 384{
385 struct cxacru_data *instance =
386 container_of(work, struct cxacru_data, poll_work.work);
385 u32 buf[CXINF_MAX] = {}; 387 u32 buf[CXINF_MAX] = {};
386 struct usbatm_data *usbatm = instance->usbatm; 388 struct usbatm_data *usbatm = instance->usbatm;
387 struct atm_dev *atm_dev = usbatm->atm_dev; 389 struct atm_dev *atm_dev = usbatm->atm_dev;
@@ -720,7 +722,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
720 722
721 mutex_init(&instance->cm_serialize); 723 mutex_init(&instance->cm_serialize);
722 724
723 INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance); 725 INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);
724 726
725 usbatm_instance->driver_data = instance; 727 usbatm_instance->driver_data = instance;
726 728
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index a823486495c3..8ed6c75adf0f 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -142,7 +142,7 @@ struct speedtch_instance_data {
142 142
143 struct speedtch_params params; /* set in probe, constant afterwards */ 143 struct speedtch_params params; /* set in probe, constant afterwards */
144 144
145 struct work_struct status_checker; 145 struct delayed_work status_checker;
146 146
147 unsigned char last_status; 147 unsigned char last_status;
148 148
@@ -498,8 +498,11 @@ static int speedtch_start_synchro(struct speedtch_instance_data *instance)
498 return ret; 498 return ret;
499} 499}
500 500
501static void speedtch_check_status(struct speedtch_instance_data *instance) 501static void speedtch_check_status(struct work_struct *work)
502{ 502{
503 struct speedtch_instance_data *instance =
504 container_of(work, struct speedtch_instance_data,
505 status_checker.work);
503 struct usbatm_data *usbatm = instance->usbatm; 506 struct usbatm_data *usbatm = instance->usbatm;
504 struct atm_dev *atm_dev = usbatm->atm_dev; 507 struct atm_dev *atm_dev = usbatm->atm_dev;
505 unsigned char *buf = instance->scratch_buffer; 508 unsigned char *buf = instance->scratch_buffer;
@@ -576,7 +579,7 @@ static void speedtch_status_poll(unsigned long data)
576{ 579{
577 struct speedtch_instance_data *instance = (void *)data; 580 struct speedtch_instance_data *instance = (void *)data;
578 581
579 schedule_work(&instance->status_checker); 582 schedule_delayed_work(&instance->status_checker, 0);
580 583
581 /* The following check is racy, but the race is harmless */ 584 /* The following check is racy, but the race is harmless */
582 if (instance->poll_delay < MAX_POLL_DELAY) 585 if (instance->poll_delay < MAX_POLL_DELAY)
@@ -596,7 +599,7 @@ static void speedtch_resubmit_int(unsigned long data)
596 if (int_urb) { 599 if (int_urb) {
597 ret = usb_submit_urb(int_urb, GFP_ATOMIC); 600 ret = usb_submit_urb(int_urb, GFP_ATOMIC);
598 if (!ret) 601 if (!ret)
599 schedule_work(&instance->status_checker); 602 schedule_delayed_work(&instance->status_checker, 0);
600 else { 603 else {
601 atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); 604 atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
602 mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); 605 mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
@@ -640,7 +643,7 @@ static void speedtch_handle_int(struct urb *int_urb)
640 643
641 if ((int_urb = instance->int_urb)) { 644 if ((int_urb = instance->int_urb)) {
642 ret = usb_submit_urb(int_urb, GFP_ATOMIC); 645 ret = usb_submit_urb(int_urb, GFP_ATOMIC);
643 schedule_work(&instance->status_checker); 646 schedule_delayed_work(&instance->status_checker, 0);
644 if (ret < 0) { 647 if (ret < 0) {
645 atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); 648 atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
646 goto fail; 649 goto fail;
@@ -855,7 +858,7 @@ static int speedtch_bind(struct usbatm_data *usbatm,
855 858
856 usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); 859 usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
857 860
858 INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance); 861 INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
859 862
860 instance->status_checker.timer.function = speedtch_status_poll; 863 instance->status_checker.timer.function = speedtch_status_poll;
861 instance->status_checker.timer.data = (unsigned long)instance; 864 instance->status_checker.timer.data = (unsigned long)instance;
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index c137c041f7a4..f2d196fa1e8b 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -655,9 +655,9 @@ static int request_dsp(struct uea_softc *sc)
655/* 655/*
656 * The uea_load_page() function must be called within a process context 656 * The uea_load_page() function must be called within a process context
657 */ 657 */
658static void uea_load_page(void *xsc) 658static void uea_load_page(struct work_struct *work)
659{ 659{
660 struct uea_softc *sc = xsc; 660 struct uea_softc *sc = container_of(work, struct uea_softc, task);
661 u16 pageno = sc->pageno; 661 u16 pageno = sc->pageno;
662 u16 ovl = sc->ovl; 662 u16 ovl = sc->ovl;
663 struct block_info bi; 663 struct block_info bi;
@@ -1348,7 +1348,7 @@ static int uea_boot(struct uea_softc *sc)
1348 1348
1349 uea_enters(INS_TO_USBDEV(sc)); 1349 uea_enters(INS_TO_USBDEV(sc));
1350 1350
1351 INIT_WORK(&sc->task, uea_load_page, sc); 1351 INIT_WORK(&sc->task, uea_load_page);
1352 init_waitqueue_head(&sc->sync_q); 1352 init_waitqueue_head(&sc->sync_q);
1353 init_waitqueue_head(&sc->cmv_ack_wait); 1353 init_waitqueue_head(&sc->cmv_ack_wait);
1354 1354
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ec3438dc8ee5..7f1fa956dcdb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -421,9 +421,9 @@ static void acm_write_bulk(struct urb *urb)
421 schedule_work(&acm->work); 421 schedule_work(&acm->work);
422} 422}
423 423
424static void acm_softint(void *private) 424static void acm_softint(struct work_struct *work)
425{ 425{
426 struct acm *acm = private; 426 struct acm *acm = container_of(work, struct acm, work);
427 dbg("Entering acm_softint."); 427 dbg("Entering acm_softint.");
428 428
429 if (!ACM_READY(acm)) 429 if (!ACM_READY(acm))
@@ -927,7 +927,7 @@ skip_normal_probe:
927 acm->rx_buflimit = num_rx_buf; 927 acm->rx_buflimit = num_rx_buf;
928 acm->urb_task.func = acm_rx_tasklet; 928 acm->urb_task.func = acm_rx_tasklet;
929 acm->urb_task.data = (unsigned long) acm; 929 acm->urb_task.data = (unsigned long) acm;
930 INIT_WORK(&acm->work, acm_softint, acm); 930 INIT_WORK(&acm->work, acm_softint);
931 spin_lock_init(&acm->throttle_lock); 931 spin_lock_init(&acm->throttle_lock);
932 spin_lock_init(&acm->write_lock); 932 spin_lock_init(&acm->write_lock);
933 spin_lock_init(&acm->read_lock); 933 spin_lock_init(&acm->read_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0ce393eb3c4b..9be41ed1f9a6 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -68,7 +68,7 @@ struct usb_hub {
68 68
69 unsigned has_indicators:1; 69 unsigned has_indicators:1;
70 u8 indicator[USB_MAXCHILDREN]; 70 u8 indicator[USB_MAXCHILDREN];
71 struct work_struct leds; 71 struct delayed_work leds;
72}; 72};
73 73
74 74
@@ -218,9 +218,10 @@ static void set_port_led(
218 218
219#define LED_CYCLE_PERIOD ((2*HZ)/3) 219#define LED_CYCLE_PERIOD ((2*HZ)/3)
220 220
221static void led_work (void *__hub) 221static void led_work (struct work_struct *work)
222{ 222{
223 struct usb_hub *hub = __hub; 223 struct usb_hub *hub =
224 container_of(work, struct usb_hub, leds.work);
224 struct usb_device *hdev = hub->hdev; 225 struct usb_device *hdev = hub->hdev;
225 unsigned i; 226 unsigned i;
226 unsigned changed = 0; 227 unsigned changed = 0;
@@ -405,9 +406,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
405 * talking to TTs must queue control transfers (not just bulk and iso), so 406 * talking to TTs must queue control transfers (not just bulk and iso), so
406 * both can talk to the same hub concurrently. 407 * both can talk to the same hub concurrently.
407 */ 408 */
408static void hub_tt_kevent (void *arg) 409static void hub_tt_kevent (struct work_struct *work)
409{ 410{
410 struct usb_hub *hub = arg; 411 struct usb_hub *hub =
412 container_of(work, struct usb_hub, tt.kevent);
411 unsigned long flags; 413 unsigned long flags;
412 414
413 spin_lock_irqsave (&hub->tt.lock, flags); 415 spin_lock_irqsave (&hub->tt.lock, flags);
@@ -694,7 +696,7 @@ static int hub_configure(struct usb_hub *hub,
694 696
695 spin_lock_init (&hub->tt.lock); 697 spin_lock_init (&hub->tt.lock);
696 INIT_LIST_HEAD (&hub->tt.clear_list); 698 INIT_LIST_HEAD (&hub->tt.clear_list);
697 INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub); 699 INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
698 switch (hdev->descriptor.bDeviceProtocol) { 700 switch (hdev->descriptor.bDeviceProtocol) {
699 case 0: 701 case 0:
700 break; 702 break;
@@ -938,7 +940,7 @@ descriptor_error:
938 INIT_LIST_HEAD(&hub->event_list); 940 INIT_LIST_HEAD(&hub->event_list);
939 hub->intfdev = &intf->dev; 941 hub->intfdev = &intf->dev;
940 hub->hdev = hdev; 942 hub->hdev = hdev;
941 INIT_WORK(&hub->leds, led_work, hub); 943 INIT_DELAYED_WORK(&hub->leds, led_work);
942 944
943 usb_set_intfdata (intf, hub); 945 usb_set_intfdata (intf, hub);
944 intf->needs_remote_wakeup = 1; 946 intf->needs_remote_wakeup = 1;
@@ -2381,7 +2383,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
2381 /* hub LEDs are probably harder to miss than syslog */ 2383 /* hub LEDs are probably harder to miss than syslog */
2382 if (hub->has_indicators) { 2384 if (hub->has_indicators) {
2383 hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; 2385 hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
2384 schedule_work (&hub->leds); 2386 schedule_delayed_work (&hub->leds, 0);
2385 } 2387 }
2386 } 2388 }
2387 kfree(qual); 2389 kfree(qual);
@@ -2555,7 +2557,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
2555 if (hub->has_indicators) { 2557 if (hub->has_indicators) {
2556 hub->indicator[port1-1] = 2558 hub->indicator[port1-1] =
2557 INDICATOR_AMBER_BLINK; 2559 INDICATOR_AMBER_BLINK;
2558 schedule_work (&hub->leds); 2560 schedule_delayed_work (&hub->leds, 0);
2559 } 2561 }
2560 status = -ENOTCONN; /* Don't retry */ 2562 status = -ENOTCONN; /* Don't retry */
2561 goto loop_disable; 2563 goto loop_disable;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 29b0fa9ff9d0..7390b67c609d 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1501,9 +1501,10 @@ struct set_config_request {
1501}; 1501};
1502 1502
1503/* Worker routine for usb_driver_set_configuration() */ 1503/* Worker routine for usb_driver_set_configuration() */
1504static void driver_set_config_work(void *_req) 1504static void driver_set_config_work(struct work_struct *work)
1505{ 1505{
1506 struct set_config_request *req = _req; 1506 struct set_config_request *req =
1507 container_of(work, struct set_config_request, work);
1507 1508
1508 usb_lock_device(req->udev); 1509 usb_lock_device(req->udev);
1509 usb_set_configuration(req->udev, req->config); 1510 usb_set_configuration(req->udev, req->config);
@@ -1541,7 +1542,7 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
1541 return -ENOMEM; 1542 return -ENOMEM;
1542 req->udev = udev; 1543 req->udev = udev;
1543 req->config = config; 1544 req->config = config;
1544 INIT_WORK(&req->work, driver_set_config_work, req); 1545 INIT_WORK(&req->work, driver_set_config_work);
1545 1546
1546 usb_get_dev(udev); 1547 usb_get_dev(udev);
1547 if (!schedule_work(&req->work)) { 1548 if (!schedule_work(&req->work)) {
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 81cb52564e68..02426d0b9a34 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -203,9 +203,10 @@ static void ksuspend_usb_cleanup(void)
203#ifdef CONFIG_USB_SUSPEND 203#ifdef CONFIG_USB_SUSPEND
204 204
205/* usb_autosuspend_work - callback routine to autosuspend a USB device */ 205/* usb_autosuspend_work - callback routine to autosuspend a USB device */
206static void usb_autosuspend_work(void *_udev) 206static void usb_autosuspend_work(struct work_struct *work)
207{ 207{
208 struct usb_device *udev = _udev; 208 struct usb_device *udev =
209 container_of(work, struct usb_device, autosuspend.work);
209 210
210 usb_pm_lock(udev); 211 usb_pm_lock(udev);
211 udev->auto_pm = 1; 212 udev->auto_pm = 1;
@@ -215,7 +216,7 @@ static void usb_autosuspend_work(void *_udev)
215 216
216#else 217#else
217 218
218static void usb_autosuspend_work(void *_udev) 219static void usb_autosuspend_work(struct work_struct *work)
219{} 220{}
220 221
221#endif /* CONFIG_USB_SUSPEND */ 222#endif /* CONFIG_USB_SUSPEND */
@@ -304,7 +305,7 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
304 305
305#ifdef CONFIG_PM 306#ifdef CONFIG_PM
306 mutex_init(&dev->pm_mutex); 307 mutex_init(&dev->pm_mutex);
307 INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev); 308 INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
308#endif 309#endif
309 return dev; 310 return dev;
310} 311}
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 3bd1dfe565c1..d15bf22b9a03 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -1833,9 +1833,9 @@ static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags)
1833 spin_unlock_irqrestore(&dev->req_lock, flags); 1833 spin_unlock_irqrestore(&dev->req_lock, flags);
1834} 1834}
1835 1835
1836static void eth_work (void *_dev) 1836static void eth_work (struct work_struct *work)
1837{ 1837{
1838 struct eth_dev *dev = _dev; 1838 struct eth_dev *dev = container_of(work, struct eth_dev, work);
1839 1839
1840 if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) { 1840 if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
1841 if (netif_running (dev->net)) 1841 if (netif_running (dev->net))
@@ -2398,7 +2398,7 @@ autoconf_fail:
2398 dev = netdev_priv(net); 2398 dev = netdev_priv(net);
2399 spin_lock_init (&dev->lock); 2399 spin_lock_init (&dev->lock);
2400 spin_lock_init (&dev->req_lock); 2400 spin_lock_init (&dev->req_lock);
2401 INIT_WORK (&dev->work, eth_work, dev); 2401 INIT_WORK (&dev->work, eth_work);
2402 INIT_LIST_HEAD (&dev->tx_reqs); 2402 INIT_LIST_HEAD (&dev->tx_reqs);
2403 INIT_LIST_HEAD (&dev->rx_reqs); 2403 INIT_LIST_HEAD (&dev->rx_reqs);
2404 2404
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 54f554e0f0ad..ac9f11d19817 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -169,21 +169,14 @@ static int sl811_cs_config(struct pcmcia_device *link)
169 169
170 DBG(0, "sl811_cs_config(0x%p)\n", link); 170 DBG(0, "sl811_cs_config(0x%p)\n", link);
171 171
172 tuple.DesiredTuple = CISTPL_CONFIG;
173 tuple.Attributes = 0;
174 tuple.TupleData = buf;
175 tuple.TupleDataMax = sizeof(buf);
176 tuple.TupleOffset = 0;
177 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
178 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
179 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
180 link->conf.ConfigBase = parse.config.base;
181 link->conf.Present = parse.config.rmask[0];
182
183 /* Look up the current Vcc */ 172 /* Look up the current Vcc */
184 CS_CHECK(GetConfigurationInfo, 173 CS_CHECK(GetConfigurationInfo,
185 pcmcia_get_configuration_info(link, &conf)); 174 pcmcia_get_configuration_info(link, &conf));
186 175
176 tuple.Attributes = 0;
177 tuple.TupleData = buf;
178 tuple.TupleDataMax = sizeof(buf);
179 tuple.TupleOffset = 0;
187 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 180 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
188 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 181 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
189 while (1) { 182 while (1) {
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index ef54e310bfc4..a9d7119e3176 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -163,7 +163,7 @@ struct u132_endp {
163 u16 queue_next; 163 u16 queue_next;
164 struct urb *urb_list[ENDP_QUEUE_SIZE]; 164 struct urb *urb_list[ENDP_QUEUE_SIZE];
165 struct list_head urb_more; 165 struct list_head urb_more;
166 struct work_struct scheduler; 166 struct delayed_work scheduler;
167}; 167};
168struct u132_ring { 168struct u132_ring {
169 unsigned in_use:1; 169 unsigned in_use:1;
@@ -171,7 +171,7 @@ struct u132_ring {
171 u8 number; 171 u8 number;
172 struct u132 *u132; 172 struct u132 *u132;
173 struct u132_endp *curr_endp; 173 struct u132_endp *curr_endp;
174 struct work_struct scheduler; 174 struct delayed_work scheduler;
175}; 175};
176#define OHCI_QUIRK_AMD756 0x01 176#define OHCI_QUIRK_AMD756 0x01
177#define OHCI_QUIRK_SUPERIO 0x02 177#define OHCI_QUIRK_SUPERIO 0x02
@@ -198,7 +198,7 @@ struct u132 {
198 u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; 198 u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
199 int flags; 199 int flags;
200 unsigned long next_statechange; 200 unsigned long next_statechange;
201 struct work_struct monitor; 201 struct delayed_work monitor;
202 int num_endpoints; 202 int num_endpoints;
203 struct u132_addr addr[MAX_U132_ADDRS]; 203 struct u132_addr addr[MAX_U132_ADDRS];
204 struct u132_udev udev[MAX_U132_UDEVS]; 204 struct u132_udev udev[MAX_U132_UDEVS];
@@ -310,7 +310,7 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
310 if (delta > 0) { 310 if (delta > 0) {
311 if (queue_delayed_work(workqueue, &ring->scheduler, delta)) 311 if (queue_delayed_work(workqueue, &ring->scheduler, delta))
312 return; 312 return;
313 } else if (queue_work(workqueue, &ring->scheduler)) 313 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
314 return; 314 return;
315 kref_put(&u132->kref, u132_hcd_delete); 315 kref_put(&u132->kref, u132_hcd_delete);
316 return; 316 return;
@@ -389,12 +389,8 @@ static inline void u132_endp_init_kref(struct u132 *u132,
389static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, 389static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
390 unsigned int delta) 390 unsigned int delta)
391{ 391{
392 if (delta > 0) { 392 if (queue_delayed_work(workqueue, &endp->scheduler, delta))
393 if (queue_delayed_work(workqueue, &endp->scheduler, delta)) 393 kref_get(&endp->kref);
394 kref_get(&endp->kref);
395 } else if (queue_work(workqueue, &endp->scheduler))
396 kref_get(&endp->kref);
397 return;
398} 394}
399 395
400static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) 396static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
@@ -410,24 +406,14 @@ static inline void u132_monitor_put_kref(struct u132 *u132)
410 406
411static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) 407static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
412{ 408{
413 if (delta > 0) { 409 if (queue_delayed_work(workqueue, &u132->monitor, delta))
414 if (queue_delayed_work(workqueue, &u132->monitor, delta)) { 410 kref_get(&u132->kref);
415 kref_get(&u132->kref);
416 }
417 } else if (queue_work(workqueue, &u132->monitor))
418 kref_get(&u132->kref);
419 return;
420} 411}
421 412
422static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) 413static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
423{ 414{
424 if (delta > 0) { 415 if (!queue_delayed_work(workqueue, &u132->monitor, delta))
425 if (queue_delayed_work(workqueue, &u132->monitor, delta)) 416 kref_put(&u132->kref, u132_hcd_delete);
426 return;
427 } else if (queue_work(workqueue, &u132->monitor))
428 return;
429 kref_put(&u132->kref, u132_hcd_delete);
430 return;
431} 417}
432 418
433static void u132_monitor_cancel_work(struct u132 *u132) 419static void u132_monitor_cancel_work(struct u132 *u132)
@@ -489,9 +475,9 @@ static int read_roothub_info(struct u132 *u132)
489 return 0; 475 return 0;
490} 476}
491 477
492static void u132_hcd_monitor_work(void *data) 478static void u132_hcd_monitor_work(struct work_struct *work)
493{ 479{
494 struct u132 *u132 = data; 480 struct u132 *u132 = container_of(work, struct u132, monitor.work);
495 if (u132->going > 1) { 481 if (u132->going > 1) {
496 dev_err(&u132->platform_dev->dev, "device has been removed %d\n" 482 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
497 , u132->going); 483 , u132->going);
@@ -1315,15 +1301,14 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
1315 } 1301 }
1316} 1302}
1317 1303
1318static void u132_hcd_ring_work_scheduler(void *data);
1319static void u132_hcd_endp_work_scheduler(void *data);
1320/* 1304/*
1321* this work function is only executed from the work queue 1305* this work function is only executed from the work queue
1322* 1306*
1323*/ 1307*/
1324static void u132_hcd_ring_work_scheduler(void *data) 1308static void u132_hcd_ring_work_scheduler(struct work_struct *work)
1325{ 1309{
1326 struct u132_ring *ring = data; 1310 struct u132_ring *ring =
1311 container_of(work, struct u132_ring, scheduler.work);
1327 struct u132 *u132 = ring->u132; 1312 struct u132 *u132 = ring->u132;
1328 down(&u132->scheduler_lock); 1313 down(&u132->scheduler_lock);
1329 if (ring->in_use) { 1314 if (ring->in_use) {
@@ -1382,10 +1367,11 @@ static void u132_hcd_ring_work_scheduler(void *data)
1382 } 1367 }
1383} 1368}
1384 1369
1385static void u132_hcd_endp_work_scheduler(void *data) 1370static void u132_hcd_endp_work_scheduler(struct work_struct *work)
1386{ 1371{
1387 struct u132_ring *ring; 1372 struct u132_ring *ring;
1388 struct u132_endp *endp = data; 1373 struct u132_endp *endp =
1374 container_of(work, struct u132_endp, scheduler.work);
1389 struct u132 *u132 = endp->u132; 1375 struct u132 *u132 = endp->u132;
1390 down(&u132->scheduler_lock); 1376 down(&u132->scheduler_lock);
1391 ring = endp->ring; 1377 ring = endp->ring;
@@ -1943,7 +1929,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
1943 if (!endp) { 1929 if (!endp) {
1944 return -ENOMEM; 1930 return -ENOMEM;
1945 } 1931 }
1946 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 1932 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1947 spin_lock_init(&endp->queue_lock.slock); 1933 spin_lock_init(&endp->queue_lock.slock);
1948 INIT_LIST_HEAD(&endp->urb_more); 1934 INIT_LIST_HEAD(&endp->urb_more);
1949 ring = endp->ring = &u132->ring[0]; 1935 ring = endp->ring = &u132->ring[0];
@@ -2032,7 +2018,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
2032 if (!endp) { 2018 if (!endp) {
2033 return -ENOMEM; 2019 return -ENOMEM;
2034 } 2020 }
2035 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2021 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2036 spin_lock_init(&endp->queue_lock.slock); 2022 spin_lock_init(&endp->queue_lock.slock);
2037 INIT_LIST_HEAD(&endp->urb_more); 2023 INIT_LIST_HEAD(&endp->urb_more);
2038 endp->dequeueing = 0; 2024 endp->dequeueing = 0;
@@ -2117,7 +2103,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
2117 if (!endp) { 2103 if (!endp) {
2118 return -ENOMEM; 2104 return -ENOMEM;
2119 } 2105 }
2120 INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2106 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2121 spin_lock_init(&endp->queue_lock.slock); 2107 spin_lock_init(&endp->queue_lock.slock);
2122 INIT_LIST_HEAD(&endp->urb_more); 2108 INIT_LIST_HEAD(&endp->urb_more);
2123 ring = endp->ring = &u132->ring[0]; 2109 ring = endp->ring = &u132->ring[0];
@@ -3096,10 +3082,10 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
3096 ring->number = rings + 1; 3082 ring->number = rings + 1;
3097 ring->length = 0; 3083 ring->length = 0;
3098 ring->curr_endp = NULL; 3084 ring->curr_endp = NULL;
3099 INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler, 3085 INIT_DELAYED_WORK(&ring->scheduler,
3100 (void *)ring); 3086 u132_hcd_ring_work_scheduler);
3101 } down(&u132->sw_lock); 3087 } down(&u132->sw_lock);
3102 INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132); 3088 INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
3103 while (ports-- > 0) { 3089 while (ports-- > 0) {
3104 struct u132_port *port = &u132->port[ports]; 3090 struct u132_port *port = &u132->port[ports];
3105 port->u132 = u132; 3091 port->u132 = u132;
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index a49644b7c58e..4295bab4f1e2 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -969,9 +969,10 @@ static void hid_retry_timeout(unsigned long _hid)
969} 969}
970 970
971/* Workqueue routine to reset the device or clear a halt */ 971/* Workqueue routine to reset the device or clear a halt */
972static void hid_reset(void *_hid) 972static void hid_reset(struct work_struct *work)
973{ 973{
974 struct hid_device *hid = (struct hid_device *) _hid; 974 struct hid_device *hid =
975 container_of(work, struct hid_device, reset_work);
975 int rc_lock, rc = 0; 976 int rc_lock, rc = 0;
976 977
977 if (test_bit(HID_CLEAR_HALT, &hid->iofl)) { 978 if (test_bit(HID_CLEAR_HALT, &hid->iofl)) {
@@ -2043,7 +2044,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
2043 2044
2044 init_waitqueue_head(&hid->wait); 2045 init_waitqueue_head(&hid->wait);
2045 2046
2046 INIT_WORK(&hid->reset_work, hid_reset, hid); 2047 INIT_WORK(&hid->reset_work, hid_reset);
2047 setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid); 2048 setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid);
2048 2049
2049 spin_lock_init(&hid->inlock); 2050 spin_lock_init(&hid->inlock);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index ba30ca6a14aa..02cbb7fff24f 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -76,7 +76,7 @@ struct appledisplay {
76 char *urbdata; /* interrupt URB data buffer */ 76 char *urbdata; /* interrupt URB data buffer */
77 char *msgdata; /* control message data buffer */ 77 char *msgdata; /* control message data buffer */
78 78
79 struct work_struct work; 79 struct delayed_work work;
80 int button_pressed; 80 int button_pressed;
81 spinlock_t lock; 81 spinlock_t lock;
82}; 82};
@@ -117,7 +117,7 @@ static void appledisplay_complete(struct urb *urb)
117 case ACD_BTN_BRIGHT_UP: 117 case ACD_BTN_BRIGHT_UP:
118 case ACD_BTN_BRIGHT_DOWN: 118 case ACD_BTN_BRIGHT_DOWN:
119 pdata->button_pressed = 1; 119 pdata->button_pressed = 1;
120 queue_work(wq, &pdata->work); 120 queue_delayed_work(wq, &pdata->work, 0);
121 break; 121 break;
122 case ACD_BTN_NONE: 122 case ACD_BTN_NONE:
123 default: 123 default:
@@ -184,9 +184,10 @@ static struct backlight_properties appledisplay_bl_data = {
184 .max_brightness = 0xFF 184 .max_brightness = 0xFF
185}; 185};
186 186
187static void appledisplay_work(void *private) 187static void appledisplay_work(struct work_struct *work)
188{ 188{
189 struct appledisplay *pdata = private; 189 struct appledisplay *pdata =
190 container_of(work, struct appledisplay, work.work);
190 int retval; 191 int retval;
191 192
192 up(&pdata->bd->sem); 193 up(&pdata->bd->sem);
@@ -238,7 +239,7 @@ static int appledisplay_probe(struct usb_interface *iface,
238 pdata->udev = udev; 239 pdata->udev = udev;
239 240
240 spin_lock_init(&pdata->lock); 241 spin_lock_init(&pdata->lock);
241 INIT_WORK(&pdata->work, appledisplay_work, pdata); 242 INIT_DELAYED_WORK(&pdata->work, appledisplay_work);
242 243
243 /* Allocate buffer for control messages */ 244 /* Allocate buffer for control messages */
244 pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL); 245 pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index cb0ba3107d7f..18b1925032a8 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -156,9 +156,9 @@ struct usb_ftdi {
156 struct usb_device *udev; 156 struct usb_device *udev;
157 struct usb_interface *interface; 157 struct usb_interface *interface;
158 struct usb_class_driver *class; 158 struct usb_class_driver *class;
159 struct work_struct status_work; 159 struct delayed_work status_work;
160 struct work_struct command_work; 160 struct delayed_work command_work;
161 struct work_struct respond_work; 161 struct delayed_work respond_work;
162 struct u132_platform_data platform_data; 162 struct u132_platform_data platform_data;
163 struct resource resources[0]; 163 struct resource resources[0];
164 struct platform_device platform_dev; 164 struct platform_device platform_dev;
@@ -210,23 +210,14 @@ static void ftdi_elan_init_kref(struct usb_ftdi *ftdi)
210 210
211static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) 211static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
212{ 212{
213 if (delta > 0) { 213 if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
214 if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) 214 kref_put(&ftdi->kref, ftdi_elan_delete);
215 return;
216 } else if (queue_work(status_queue, &ftdi->status_work))
217 return;
218 kref_put(&ftdi->kref, ftdi_elan_delete);
219 return;
220} 215}
221 216
222static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta) 217static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
223{ 218{
224 if (delta > 0) { 219 if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
225 if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) 220 kref_get(&ftdi->kref);
226 kref_get(&ftdi->kref);
227 } else if (queue_work(status_queue, &ftdi->status_work))
228 kref_get(&ftdi->kref);
229 return;
230} 221}
231 222
232static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) 223static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
@@ -237,25 +228,14 @@ static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
237 228
238static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) 229static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
239{ 230{
240 if (delta > 0) { 231 if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
241 if (queue_delayed_work(command_queue, &ftdi->command_work, 232 kref_put(&ftdi->kref, ftdi_elan_delete);
242 delta))
243 return;
244 } else if (queue_work(command_queue, &ftdi->command_work))
245 return;
246 kref_put(&ftdi->kref, ftdi_elan_delete);
247 return;
248} 233}
249 234
250static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta) 235static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
251{ 236{
252 if (delta > 0) { 237 if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
253 if (queue_delayed_work(command_queue, &ftdi->command_work, 238 kref_get(&ftdi->kref);
254 delta))
255 kref_get(&ftdi->kref);
256 } else if (queue_work(command_queue, &ftdi->command_work))
257 kref_get(&ftdi->kref);
258 return;
259} 239}
260 240
261static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) 241static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
@@ -267,25 +247,14 @@ static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
267static void ftdi_response_requeue_work(struct usb_ftdi *ftdi, 247static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
268 unsigned int delta) 248 unsigned int delta)
269{ 249{
270 if (delta > 0) { 250 if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
271 if (queue_delayed_work(respond_queue, &ftdi->respond_work, 251 kref_put(&ftdi->kref, ftdi_elan_delete);
272 delta))
273 return;
274 } else if (queue_work(respond_queue, &ftdi->respond_work))
275 return;
276 kref_put(&ftdi->kref, ftdi_elan_delete);
277 return;
278} 252}
279 253
280static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta) 254static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
281{ 255{
282 if (delta > 0) { 256 if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
283 if (queue_delayed_work(respond_queue, &ftdi->respond_work, 257 kref_get(&ftdi->kref);
284 delta))
285 kref_get(&ftdi->kref);
286 } else if (queue_work(respond_queue, &ftdi->respond_work))
287 kref_get(&ftdi->kref);
288 return;
289} 258}
290 259
291static void ftdi_response_cancel_work(struct usb_ftdi *ftdi) 260static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
@@ -475,9 +444,11 @@ static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi)
475 return; 444 return;
476} 445}
477 446
478static void ftdi_elan_command_work(void *data) 447static void ftdi_elan_command_work(struct work_struct *work)
479{ 448{
480 struct usb_ftdi *ftdi = data; 449 struct usb_ftdi *ftdi =
450 container_of(work, struct usb_ftdi, command_work.work);
451
481 if (ftdi->disconnected > 0) { 452 if (ftdi->disconnected > 0) {
482 ftdi_elan_put_kref(ftdi); 453 ftdi_elan_put_kref(ftdi);
483 return; 454 return;
@@ -500,9 +471,10 @@ static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi)
500 return; 471 return;
501} 472}
502 473
503static void ftdi_elan_respond_work(void *data) 474static void ftdi_elan_respond_work(struct work_struct *work)
504{ 475{
505 struct usb_ftdi *ftdi = data; 476 struct usb_ftdi *ftdi =
477 container_of(work, struct usb_ftdi, respond_work.work);
506 if (ftdi->disconnected > 0) { 478 if (ftdi->disconnected > 0) {
507 ftdi_elan_put_kref(ftdi); 479 ftdi_elan_put_kref(ftdi);
508 return; 480 return;
@@ -534,9 +506,10 @@ static void ftdi_elan_respond_work(void *data)
534* after the FTDI has been synchronized 506* after the FTDI has been synchronized
535* 507*
536*/ 508*/
537static void ftdi_elan_status_work(void *data) 509static void ftdi_elan_status_work(struct work_struct *work)
538{ 510{
539 struct usb_ftdi *ftdi = data; 511 struct usb_ftdi *ftdi =
512 container_of(work, struct usb_ftdi, status_work.work);
540 int work_delay_in_msec = 0; 513 int work_delay_in_msec = 0;
541 if (ftdi->disconnected > 0) { 514 if (ftdi->disconnected > 0) {
542 ftdi_elan_put_kref(ftdi); 515 ftdi_elan_put_kref(ftdi);
@@ -2677,12 +2650,9 @@ static int ftdi_elan_probe(struct usb_interface *interface,
2677 ftdi->class = NULL; 2650 ftdi->class = NULL;
2678 dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a" 2651 dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a"
2679 "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber); 2652 "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber);
2680 INIT_WORK(&ftdi->status_work, ftdi_elan_status_work, 2653 INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work);
2681 (void *)ftdi); 2654 INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work);
2682 INIT_WORK(&ftdi->command_work, ftdi_elan_command_work, 2655 INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work);
2683 (void *)ftdi);
2684 INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work,
2685 (void *)ftdi);
2686 ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000)); 2656 ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000));
2687 return 0; 2657 return 0;
2688 } else { 2658 } else {
diff --git a/drivers/usb/misc/phidgetkit.c b/drivers/usb/misc/phidgetkit.c
index 9110793f81d3..9659c79e187e 100644
--- a/drivers/usb/misc/phidgetkit.c
+++ b/drivers/usb/misc/phidgetkit.c
@@ -81,8 +81,8 @@ struct interfacekit {
81 unsigned char *data; 81 unsigned char *data;
82 dma_addr_t data_dma; 82 dma_addr_t data_dma;
83 83
84 struct work_struct do_notify; 84 struct delayed_work do_notify;
85 struct work_struct do_resubmit; 85 struct delayed_work do_resubmit;
86 unsigned long input_events; 86 unsigned long input_events;
87 unsigned long sensor_events; 87 unsigned long sensor_events;
88}; 88};
@@ -374,7 +374,7 @@ static void interfacekit_irq(struct urb *urb)
374 } 374 }
375 375
376 if (kit->input_events || kit->sensor_events) 376 if (kit->input_events || kit->sensor_events)
377 schedule_work(&kit->do_notify); 377 schedule_delayed_work(&kit->do_notify, 0);
378 378
379resubmit: 379resubmit:
380 status = usb_submit_urb(urb, SLAB_ATOMIC); 380 status = usb_submit_urb(urb, SLAB_ATOMIC);
@@ -384,9 +384,10 @@ resubmit:
384 kit->udev->devpath, status); 384 kit->udev->devpath, status);
385} 385}
386 386
387static void do_notify(void *data) 387static void do_notify(struct work_struct *work)
388{ 388{
389 struct interfacekit *kit = data; 389 struct interfacekit *kit =
390 container_of(work, struct interfacekit, do_notify.work);
390 int i; 391 int i;
391 char sysfs_file[8]; 392 char sysfs_file[8];
392 393
@@ -405,9 +406,11 @@ static void do_notify(void *data)
405 } 406 }
406} 407}
407 408
408static void do_resubmit(void *data) 409static void do_resubmit(struct work_struct *work)
409{ 410{
410 set_outputs(data); 411 struct interfacekit *kit =
412 container_of(work, struct interfacekit, do_resubmit.work);
413 set_outputs(kit);
411} 414}
412 415
413#define show_set_output(value) \ 416#define show_set_output(value) \
@@ -575,8 +578,8 @@ static int interfacekit_probe(struct usb_interface *intf, const struct usb_devic
575 578
576 kit->udev = usb_get_dev(dev); 579 kit->udev = usb_get_dev(dev);
577 kit->intf = intf; 580 kit->intf = intf;
578 INIT_WORK(&kit->do_notify, do_notify, kit); 581 INIT_DELAYED_WORK(&kit->do_notify, do_notify);
579 INIT_WORK(&kit->do_resubmit, do_resubmit, kit); 582 INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit);
580 usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data, 583 usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data,
581 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, 584 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
582 interfacekit_irq, kit, endpoint->bInterval); 585 interfacekit_irq, kit, endpoint->bInterval);
diff --git a/drivers/usb/misc/phidgetmotorcontrol.c b/drivers/usb/misc/phidgetmotorcontrol.c
index c3469b0a67c2..2bb4fa572bb7 100644
--- a/drivers/usb/misc/phidgetmotorcontrol.c
+++ b/drivers/usb/misc/phidgetmotorcontrol.c
@@ -41,7 +41,7 @@ struct motorcontrol {
41 unsigned char *data; 41 unsigned char *data;
42 dma_addr_t data_dma; 42 dma_addr_t data_dma;
43 43
44 struct work_struct do_notify; 44 struct delayed_work do_notify;
45 unsigned long input_events; 45 unsigned long input_events;
46 unsigned long speed_events; 46 unsigned long speed_events;
47 unsigned long exceed_events; 47 unsigned long exceed_events;
@@ -148,7 +148,7 @@ static void motorcontrol_irq(struct urb *urb)
148 set_bit(1, &mc->exceed_events); 148 set_bit(1, &mc->exceed_events);
149 149
150 if (mc->input_events || mc->exceed_events || mc->speed_events) 150 if (mc->input_events || mc->exceed_events || mc->speed_events)
151 schedule_work(&mc->do_notify); 151 schedule_delayed_work(&mc->do_notify, 0);
152 152
153resubmit: 153resubmit:
154 status = usb_submit_urb(urb, SLAB_ATOMIC); 154 status = usb_submit_urb(urb, SLAB_ATOMIC);
@@ -159,9 +159,10 @@ resubmit:
159 mc->udev->devpath, status); 159 mc->udev->devpath, status);
160} 160}
161 161
162static void do_notify(void *data) 162static void do_notify(struct work_struct *work)
163{ 163{
164 struct motorcontrol *mc = data; 164 struct motorcontrol *mc =
165 container_of(work, struct motorcontrol, do_notify.work);
165 int i; 166 int i;
166 char sysfs_file[8]; 167 char sysfs_file[8];
167 168
@@ -348,7 +349,7 @@ static int motorcontrol_probe(struct usb_interface *intf, const struct usb_devic
348 mc->udev = usb_get_dev(dev); 349 mc->udev = usb_get_dev(dev);
349 mc->intf = intf; 350 mc->intf = intf;
350 mc->acceleration[0] = mc->acceleration[1] = 10; 351 mc->acceleration[0] = mc->acceleration[1] = 10;
351 INIT_WORK(&mc->do_notify, do_notify, mc); 352 INIT_DELAYED_WORK(&mc->do_notify, do_notify);
352 usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data, 353 usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data,
353 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, 354 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
354 motorcontrol_irq, mc, endpoint->bInterval); 355 motorcontrol_irq, mc, endpoint->bInterval);
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index 7c906a43e497..fa78326d0bf0 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -222,7 +222,7 @@ struct kaweth_device
222 int suspend_lowmem_ctrl; 222 int suspend_lowmem_ctrl;
223 int linkstate; 223 int linkstate;
224 int opened; 224 int opened;
225 struct work_struct lowmem_work; 225 struct delayed_work lowmem_work;
226 226
227 struct usb_device *dev; 227 struct usb_device *dev;
228 struct net_device *net; 228 struct net_device *net;
@@ -530,9 +530,10 @@ resubmit:
530 kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); 530 kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC);
531} 531}
532 532
533static void kaweth_resubmit_tl(void *d) 533static void kaweth_resubmit_tl(struct work_struct *work)
534{ 534{
535 struct kaweth_device *kaweth = (struct kaweth_device *)d; 535 struct kaweth_device *kaweth =
536 container_of(work, struct kaweth_device, lowmem_work.work);
536 537
537 if (IS_BLOCKED(kaweth->status)) 538 if (IS_BLOCKED(kaweth->status))
538 return; 539 return;
@@ -1126,7 +1127,7 @@ err_fw:
1126 1127
1127 /* kaweth is zeroed as part of alloc_netdev */ 1128 /* kaweth is zeroed as part of alloc_netdev */
1128 1129
1129 INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth); 1130 INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
1130 1131
1131 SET_MODULE_OWNER(netdev); 1132 SET_MODULE_OWNER(netdev);
1132 1133
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index 69eb0db399df..b5690b3834e3 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -1281,9 +1281,9 @@ static inline void setup_pegasus_II(pegasus_t * pegasus)
1281static struct workqueue_struct *pegasus_workqueue = NULL; 1281static struct workqueue_struct *pegasus_workqueue = NULL;
1282#define CARRIER_CHECK_DELAY (2 * HZ) 1282#define CARRIER_CHECK_DELAY (2 * HZ)
1283 1283
1284static void check_carrier(void *data) 1284static void check_carrier(struct work_struct *work)
1285{ 1285{
1286 pegasus_t *pegasus = data; 1286 pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
1287 set_carrier(pegasus->net); 1287 set_carrier(pegasus->net);
1288 if (!(pegasus->flags & PEGASUS_UNPLUG)) { 1288 if (!(pegasus->flags & PEGASUS_UNPLUG)) {
1289 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, 1289 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
@@ -1319,7 +1319,7 @@ static int pegasus_probe(struct usb_interface *intf,
1319 1319
1320 tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus); 1320 tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
1321 1321
1322 INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus); 1322 INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
1323 1323
1324 pegasus->intf = intf; 1324 pegasus->intf = intf;
1325 pegasus->usb = dev; 1325 pegasus->usb = dev;
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h
index 006438069b66..98f6898cae1f 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/usb/net/pegasus.h
@@ -95,7 +95,7 @@ typedef struct pegasus {
95 int dev_index; 95 int dev_index;
96 int intr_interval; 96 int intr_interval;
97 struct tasklet_struct rx_tl; 97 struct tasklet_struct rx_tl;
98 struct work_struct carrier_check; 98 struct delayed_work carrier_check;
99 struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb; 99 struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb;
100 struct sk_buff *rx_pool[RX_SKBS]; 100 struct sk_buff *rx_pool[RX_SKBS];
101 struct sk_buff *rx_skb; 101 struct sk_buff *rx_skb;
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 7672e11c94c4..327f97555679 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -782,9 +782,10 @@ static struct ethtool_ops usbnet_ethtool_ops = {
782 * especially now that control transfers can be queued. 782 * especially now that control transfers can be queued.
783 */ 783 */
784static void 784static void
785kevent (void *data) 785kevent (struct work_struct *work)
786{ 786{
787 struct usbnet *dev = data; 787 struct usbnet *dev =
788 container_of(work, struct usbnet, kevent);
788 int status; 789 int status;
789 790
790 /* usb_clear_halt() needs a thread context */ 791 /* usb_clear_halt() needs a thread context */
@@ -1146,7 +1147,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1146 skb_queue_head_init (&dev->done); 1147 skb_queue_head_init (&dev->done);
1147 dev->bh.func = usbnet_bh; 1148 dev->bh.func = usbnet_bh;
1148 dev->bh.data = (unsigned long) dev; 1149 dev->bh.data = (unsigned long) dev;
1149 INIT_WORK (&dev->kevent, kevent, dev); 1150 INIT_WORK (&dev->kevent, kevent);
1150 dev->delay.function = usbnet_bh; 1151 dev->delay.function = usbnet_bh;
1151 dev->delay.data = (unsigned long) dev; 1152 dev->delay.data = (unsigned long) dev;
1152 init_timer (&dev->delay); 1153 init_timer (&dev->delay);
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b1b5707bc99a..86bcf63b6ba5 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -92,6 +92,7 @@ struct aircable_private {
92 struct circ_buf *rx_buf; /* read buffer */ 92 struct circ_buf *rx_buf; /* read buffer */
93 int rx_flags; /* for throttilng */ 93 int rx_flags; /* for throttilng */
94 struct work_struct rx_work; /* work cue for the receiving line */ 94 struct work_struct rx_work; /* work cue for the receiving line */
95 struct usb_serial_port *port; /* USB port with which associated */
95}; 96};
96 97
97/* Private methods */ 98/* Private methods */
@@ -251,10 +252,11 @@ static void aircable_send(struct usb_serial_port *port)
251 schedule_work(&port->work); 252 schedule_work(&port->work);
252} 253}
253 254
254static void aircable_read(void *params) 255static void aircable_read(struct work_struct *work)
255{ 256{
256 struct usb_serial_port *port = params; 257 struct aircable_private *priv =
257 struct aircable_private *priv = usb_get_serial_port_data(port); 258 container_of(work, struct aircable_private, rx_work);
259 struct usb_serial_port *port = priv->port;
258 struct tty_struct *tty; 260 struct tty_struct *tty;
259 unsigned char *data; 261 unsigned char *data;
260 int count; 262 int count;
@@ -349,7 +351,8 @@ static int aircable_attach (struct usb_serial *serial)
349 } 351 }
350 352
351 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); 353 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
352 INIT_WORK(&priv->rx_work, aircable_read, port); 354 priv->port = port;
355 INIT_WORK(&priv->rx_work, aircable_read);
353 356
354 usb_set_serial_port_data(serial->port[0], priv); 357 usb_set_serial_port_data(serial->port[0], priv);
355 358
@@ -516,7 +519,7 @@ static void aircable_read_bulk_callback(struct urb *urb)
516 package_length - shift); 519 package_length - shift);
517 } 520 }
518 } 521 }
519 aircable_read(port); 522 aircable_read(&priv->rx_work);
520 } 523 }
521 524
522 /* Schedule the next read _if_ we are still open */ 525 /* Schedule the next read _if_ we are still open */
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 5e3ac281a2f8..83d0e21145b0 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -430,13 +430,14 @@ struct digi_port {
430 int dp_in_close; /* close in progress */ 430 int dp_in_close; /* close in progress */
431 wait_queue_head_t dp_close_wait; /* wait queue for close */ 431 wait_queue_head_t dp_close_wait; /* wait queue for close */
432 struct work_struct dp_wakeup_work; 432 struct work_struct dp_wakeup_work;
433 struct usb_serial_port *dp_port;
433}; 434};
434 435
435 436
436/* Local Function Declarations */ 437/* Local Function Declarations */
437 438
438static void digi_wakeup_write( struct usb_serial_port *port ); 439static void digi_wakeup_write( struct usb_serial_port *port );
439static void digi_wakeup_write_lock(void *); 440static void digi_wakeup_write_lock(struct work_struct *work);
440static int digi_write_oob_command( struct usb_serial_port *port, 441static int digi_write_oob_command( struct usb_serial_port *port,
441 unsigned char *buf, int count, int interruptible ); 442 unsigned char *buf, int count, int interruptible );
442static int digi_write_inb_command( struct usb_serial_port *port, 443static int digi_write_inb_command( struct usb_serial_port *port,
@@ -598,11 +599,12 @@ static inline long cond_wait_interruptible_timeout_irqrestore(
598* on writes. 599* on writes.
599*/ 600*/
600 601
601static void digi_wakeup_write_lock(void *arg) 602static void digi_wakeup_write_lock(struct work_struct *work)
602{ 603{
603 struct usb_serial_port *port = arg; 604 struct digi_port *priv =
605 container_of(work, struct digi_port, dp_wakeup_work);
606 struct usb_serial_port *port = priv->dp_port;
604 unsigned long flags; 607 unsigned long flags;
605 struct digi_port *priv = usb_get_serial_port_data(port);
606 608
607 609
608 spin_lock_irqsave( &priv->dp_port_lock, flags ); 610 spin_lock_irqsave( &priv->dp_port_lock, flags );
@@ -1702,8 +1704,8 @@ dbg( "digi_startup: TOP" );
1702 init_waitqueue_head( &priv->dp_flush_wait ); 1704 init_waitqueue_head( &priv->dp_flush_wait );
1703 priv->dp_in_close = 0; 1705 priv->dp_in_close = 0;
1704 init_waitqueue_head( &priv->dp_close_wait ); 1706 init_waitqueue_head( &priv->dp_close_wait );
1705 INIT_WORK(&priv->dp_wakeup_work, 1707 INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
1706 digi_wakeup_write_lock, serial->port[i]); 1708 priv->dp_port = serial->port[i];
1707 1709
1708 /* initialize write wait queue for this port */ 1710 /* initialize write wait queue for this port */
1709 init_waitqueue_head( &serial->port[i]->write_wait ); 1711 init_waitqueue_head( &serial->port[i]->write_wait );
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89ce2775be15..72e4d48f51e9 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -559,7 +559,8 @@ struct ftdi_private {
559 char prev_status, diff_status; /* Used for TIOCMIWAIT */ 559 char prev_status, diff_status; /* Used for TIOCMIWAIT */
560 __u8 rx_flags; /* receive state flags (throttling) */ 560 __u8 rx_flags; /* receive state flags (throttling) */
561 spinlock_t rx_lock; /* spinlock for receive state */ 561 spinlock_t rx_lock; /* spinlock for receive state */
562 struct work_struct rx_work; 562 struct delayed_work rx_work;
563 struct usb_serial_port *port;
563 int rx_processed; 564 int rx_processed;
564 unsigned long rx_bytes; 565 unsigned long rx_bytes;
565 566
@@ -593,7 +594,7 @@ static int ftdi_write_room (struct usb_serial_port *port);
593static int ftdi_chars_in_buffer (struct usb_serial_port *port); 594static int ftdi_chars_in_buffer (struct usb_serial_port *port);
594static void ftdi_write_bulk_callback (struct urb *urb); 595static void ftdi_write_bulk_callback (struct urb *urb);
595static void ftdi_read_bulk_callback (struct urb *urb); 596static void ftdi_read_bulk_callback (struct urb *urb);
596static void ftdi_process_read (void *param); 597static void ftdi_process_read (struct work_struct *work);
597static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); 598static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old);
598static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); 599static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file);
599static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); 600static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
@@ -1201,7 +1202,8 @@ static int ftdi_sio_attach (struct usb_serial *serial)
1201 port->read_urb->transfer_buffer_length = BUFSZ; 1202 port->read_urb->transfer_buffer_length = BUFSZ;
1202 } 1203 }
1203 1204
1204 INIT_WORK(&priv->rx_work, ftdi_process_read, port); 1205 INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
1206 priv->port = port;
1205 1207
1206 /* Free port's existing write urb and transfer buffer. */ 1208 /* Free port's existing write urb and transfer buffer. */
1207 if (port->write_urb) { 1209 if (port->write_urb) {
@@ -1640,17 +1642,18 @@ static void ftdi_read_bulk_callback (struct urb *urb)
1640 priv->rx_bytes += countread; 1642 priv->rx_bytes += countread;
1641 spin_unlock_irqrestore(&priv->rx_lock, flags); 1643 spin_unlock_irqrestore(&priv->rx_lock, flags);
1642 1644
1643 ftdi_process_read(port); 1645 ftdi_process_read(&priv->rx_work.work);
1644 1646
1645} /* ftdi_read_bulk_callback */ 1647} /* ftdi_read_bulk_callback */
1646 1648
1647 1649
1648static void ftdi_process_read (void *param) 1650static void ftdi_process_read (struct work_struct *work)
1649{ /* ftdi_process_read */ 1651{ /* ftdi_process_read */
1650 struct usb_serial_port *port = (struct usb_serial_port*)param; 1652 struct ftdi_private *priv =
1653 container_of(work, struct ftdi_private, rx_work.work);
1654 struct usb_serial_port *port = priv->port;
1651 struct urb *urb; 1655 struct urb *urb;
1652 struct tty_struct *tty; 1656 struct tty_struct *tty;
1653 struct ftdi_private *priv;
1654 char error_flag; 1657 char error_flag;
1655 unsigned char *data; 1658 unsigned char *data;
1656 1659
@@ -2179,7 +2182,7 @@ static void ftdi_unthrottle (struct usb_serial_port *port)
2179 spin_unlock_irqrestore(&priv->rx_lock, flags); 2182 spin_unlock_irqrestore(&priv->rx_lock, flags);
2180 2183
2181 if (actually_throttled) 2184 if (actually_throttled)
2182 schedule_work(&priv->rx_work); 2185 schedule_delayed_work(&priv->rx_work, 0);
2183} 2186}
2184 2187
2185static int __init ftdi_init (void) 2188static int __init ftdi_init (void)
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 909005107ea2..e09a0bfe6231 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -120,6 +120,8 @@ struct keyspan_pda_private {
120 int tx_throttled; 120 int tx_throttled;
121 struct work_struct wakeup_work; 121 struct work_struct wakeup_work;
122 struct work_struct unthrottle_work; 122 struct work_struct unthrottle_work;
123 struct usb_serial *serial;
124 struct usb_serial_port *port;
123}; 125};
124 126
125 127
@@ -175,9 +177,11 @@ static struct usb_device_id id_table_fake_xircom [] = {
175}; 177};
176#endif 178#endif
177 179
178static void keyspan_pda_wakeup_write( struct usb_serial_port *port ) 180static void keyspan_pda_wakeup_write(struct work_struct *work)
179{ 181{
180 182 struct keyspan_pda_private *priv =
183 container_of(work, struct keyspan_pda_private, wakeup_work);
184 struct usb_serial_port *port = priv->port;
181 struct tty_struct *tty = port->tty; 185 struct tty_struct *tty = port->tty;
182 186
183 /* wake up port processes */ 187 /* wake up port processes */
@@ -187,8 +191,11 @@ static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
187 tty_wakeup(tty); 191 tty_wakeup(tty);
188} 192}
189 193
190static void keyspan_pda_request_unthrottle( struct usb_serial *serial ) 194static void keyspan_pda_request_unthrottle(struct work_struct *work)
191{ 195{
196 struct keyspan_pda_private *priv =
197 container_of(work, struct keyspan_pda_private, unthrottle_work);
198 struct usb_serial *serial = priv->serial;
192 int result; 199 int result;
193 200
194 dbg(" request_unthrottle"); 201 dbg(" request_unthrottle");
@@ -765,11 +772,10 @@ static int keyspan_pda_startup (struct usb_serial *serial)
765 return (1); /* error */ 772 return (1); /* error */
766 usb_set_serial_port_data(serial->port[0], priv); 773 usb_set_serial_port_data(serial->port[0], priv);
767 init_waitqueue_head(&serial->port[0]->write_wait); 774 init_waitqueue_head(&serial->port[0]->write_wait);
768 INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write, 775 INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
769 (void *)(serial->port[0])); 776 INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
770 INIT_WORK(&priv->unthrottle_work, 777 priv->serial = serial;
771 (void *)keyspan_pda_request_unthrottle, 778 priv->port = serial->port[0];
772 (void *)(serial));
773 return (0); 779 return (0);
774} 780}
775 781
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index c1257d5292f5..3d5072f14b8d 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -533,9 +533,10 @@ void usb_serial_port_softint(struct usb_serial_port *port)
533 schedule_work(&port->work); 533 schedule_work(&port->work);
534} 534}
535 535
536static void usb_serial_port_work(void *private) 536static void usb_serial_port_work(struct work_struct *work)
537{ 537{
538 struct usb_serial_port *port = private; 538 struct usb_serial_port *port =
539 container_of(work, struct usb_serial_port, work);
539 struct tty_struct *tty; 540 struct tty_struct *tty;
540 541
541 dbg("%s - port %d", __FUNCTION__, port->number); 542 dbg("%s - port %d", __FUNCTION__, port->number);
@@ -799,7 +800,7 @@ int usb_serial_probe(struct usb_interface *interface,
799 port->serial = serial; 800 port->serial = serial;
800 spin_lock_init(&port->lock); 801 spin_lock_init(&port->lock);
801 mutex_init(&port->mutex); 802 mutex_init(&port->mutex);
802 INIT_WORK(&port->work, usb_serial_port_work, port); 803 INIT_WORK(&port->work, usb_serial_port_work);
803 serial->port[i] = port; 804 serial->port[i] = port;
804 } 805 }
805 806
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 4d1cd7aeccd3..154c7d290597 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -227,6 +227,7 @@ struct whiteheat_private {
227 struct list_head rx_urbs_submitted; 227 struct list_head rx_urbs_submitted;
228 struct list_head rx_urb_q; 228 struct list_head rx_urb_q;
229 struct work_struct rx_work; 229 struct work_struct rx_work;
230 struct usb_serial_port *port;
230 struct list_head tx_urbs_free; 231 struct list_head tx_urbs_free;
231 struct list_head tx_urbs_submitted; 232 struct list_head tx_urbs_submitted;
232}; 233};
@@ -241,7 +242,7 @@ static void command_port_read_callback(struct urb *urb);
241static int start_port_read(struct usb_serial_port *port); 242static int start_port_read(struct usb_serial_port *port);
242static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head); 243static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head);
243static struct list_head *list_first(struct list_head *head); 244static struct list_head *list_first(struct list_head *head);
244static void rx_data_softint(void *private); 245static void rx_data_softint(struct work_struct *work);
245 246
246static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize); 247static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize);
247static int firm_open(struct usb_serial_port *port); 248static int firm_open(struct usb_serial_port *port);
@@ -424,7 +425,8 @@ static int whiteheat_attach (struct usb_serial *serial)
424 spin_lock_init(&info->lock); 425 spin_lock_init(&info->lock);
425 info->flags = 0; 426 info->flags = 0;
426 info->mcr = 0; 427 info->mcr = 0;
427 INIT_WORK(&info->rx_work, rx_data_softint, port); 428 INIT_WORK(&info->rx_work, rx_data_softint);
429 info->port = port;
428 430
429 INIT_LIST_HEAD(&info->rx_urbs_free); 431 INIT_LIST_HEAD(&info->rx_urbs_free);
430 INIT_LIST_HEAD(&info->rx_urbs_submitted); 432 INIT_LIST_HEAD(&info->rx_urbs_submitted);
@@ -949,7 +951,7 @@ static void whiteheat_unthrottle (struct usb_serial_port *port)
949 spin_unlock_irqrestore(&info->lock, flags); 951 spin_unlock_irqrestore(&info->lock, flags);
950 952
951 if (actually_throttled) 953 if (actually_throttled)
952 rx_data_softint(port); 954 rx_data_softint(&info->rx_work);
953 955
954 return; 956 return;
955} 957}
@@ -1400,10 +1402,11 @@ static struct list_head *list_first(struct list_head *head)
1400} 1402}
1401 1403
1402 1404
1403static void rx_data_softint(void *private) 1405static void rx_data_softint(struct work_struct *work)
1404{ 1406{
1405 struct usb_serial_port *port = (struct usb_serial_port *)private; 1407 struct whiteheat_private *info =
1406 struct whiteheat_private *info = usb_get_serial_port_data(port); 1408 container_of(work, struct whiteheat_private, rx_work);
1409 struct usb_serial_port *port = info->port;
1407 struct tty_struct *tty = port->tty; 1410 struct tty_struct *tty = port->tty;
1408 struct whiteheat_urb_wrap *wrap; 1411 struct whiteheat_urb_wrap *wrap;
1409 struct urb *urb; 1412 struct urb *urb;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 302174b8e477..31f476a64790 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -383,9 +383,9 @@ static void fbcon_update_softback(struct vc_data *vc)
383 softback_top = 0; 383 softback_top = 0;
384} 384}
385 385
386static void fb_flashcursor(void *private) 386static void fb_flashcursor(struct work_struct *work)
387{ 387{
388 struct fb_info *info = private; 388 struct fb_info *info = container_of(work, struct fb_info, queue);
389 struct fbcon_ops *ops = info->fbcon_par; 389 struct fbcon_ops *ops = info->fbcon_par;
390 struct display *p; 390 struct display *p;
391 struct vc_data *vc = NULL; 391 struct vc_data *vc = NULL;
@@ -442,7 +442,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
442 if ((!info->queue.func || info->queue.func == fb_flashcursor) && 442 if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
443 !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { 443 !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
444 if (!info->queue.func) 444 if (!info->queue.func)
445 INIT_WORK(&info->queue, fb_flashcursor, info); 445 INIT_WORK(&info->queue, fb_flashcursor);
446 446
447 init_timer(&ops->cursor_timer); 447 init_timer(&ops->cursor_timer);
448 ops->cursor_timer.function = cursor_timer_handler; 448 ops->cursor_timer.function = cursor_timer_handler;
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 8a8ae55a7403..38eb0b69c2d7 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -964,9 +964,10 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
964 * Our LCD controller task (which is called when we blank or unblank) 964 * Our LCD controller task (which is called when we blank or unblank)
965 * via keventd. 965 * via keventd.
966 */ 966 */
967static void pxafb_task(void *dummy) 967static void pxafb_task(struct work_struct *work)
968{ 968{
969 struct pxafb_info *fbi = dummy; 969 struct pxafb_info *fbi =
970 container_of(work, struct pxafb_info, task);
970 u_int state = xchg(&fbi->task_state, -1); 971 u_int state = xchg(&fbi->task_state, -1);
971 972
972 set_ctrlr_state(fbi, state); 973 set_ctrlr_state(fbi, state);
@@ -1159,7 +1160,7 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev)
1159 } 1160 }
1160 1161
1161 init_waitqueue_head(&fbi->ctrlr_wait); 1162 init_waitqueue_head(&fbi->ctrlr_wait);
1162 INIT_WORK(&fbi->task, pxafb_task, fbi); 1163 INIT_WORK(&fbi->task, pxafb_task);
1163 init_MUTEX(&fbi->ctrlr_sem); 1164 init_MUTEX(&fbi->ctrlr_sem);
1164 1165
1165 return fbi; 1166 return fbi;
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 90a79c784549..944273c3dbff 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -110,8 +110,8 @@ struct v9fs_mux_rpc {
110}; 110};
111 111
112static int v9fs_poll_proc(void *); 112static int v9fs_poll_proc(void *);
113static void v9fs_read_work(void *); 113static void v9fs_read_work(struct work_struct *work);
114static void v9fs_write_work(void *); 114static void v9fs_write_work(struct work_struct *work);
115static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, 115static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
116 poll_table * p); 116 poll_table * p);
117static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); 117static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
@@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
297 m->rbuf = NULL; 297 m->rbuf = NULL;
298 m->wpos = m->wsize = 0; 298 m->wpos = m->wsize = 0;
299 m->wbuf = NULL; 299 m->wbuf = NULL;
300 INIT_WORK(&m->rq, v9fs_read_work, m); 300 INIT_WORK(&m->rq, v9fs_read_work);
301 INIT_WORK(&m->wq, v9fs_write_work, m); 301 INIT_WORK(&m->wq, v9fs_write_work);
302 m->wsched = 0; 302 m->wsched = 0;
303 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); 303 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
304 m->poll_task = NULL; 304 m->poll_task = NULL;
@@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a)
458/** 458/**
459 * v9fs_write_work - called when a transport can send some data 459 * v9fs_write_work - called when a transport can send some data
460 */ 460 */
461static void v9fs_write_work(void *a) 461static void v9fs_write_work(struct work_struct *work)
462{ 462{
463 int n, err; 463 int n, err;
464 struct v9fs_mux_data *m; 464 struct v9fs_mux_data *m;
465 struct v9fs_req *req; 465 struct v9fs_req *req;
466 466
467 m = a; 467 m = container_of(work, struct v9fs_mux_data, wq);
468 468
469 if (m->err < 0) { 469 if (m->err < 0) {
470 clear_bit(Wworksched, &m->wsched); 470 clear_bit(Wworksched, &m->wsched);
@@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
564/** 564/**
565 * v9fs_read_work - called when there is some data to be read from a transport 565 * v9fs_read_work - called when there is some data to be read from a transport
566 */ 566 */
567static void v9fs_read_work(void *a) 567static void v9fs_read_work(struct work_struct *work)
568{ 568{
569 int n, err; 569 int n, err;
570 struct v9fs_mux_data *m; 570 struct v9fs_mux_data *m;
@@ -572,7 +572,7 @@ static void v9fs_read_work(void *a)
572 struct v9fs_fcall *rcall; 572 struct v9fs_fcall *rcall;
573 char *rbuf; 573 char *rbuf;
574 574
575 m = a; 575 m = container_of(work, struct v9fs_mux_data, rq);
576 576
577 if (m->err < 0) 577 if (m->err < 0)
578 return; 578 return;
diff --git a/fs/aio.c b/fs/aio.c
index 277a5f2d18ad..287a1bc7a182 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -53,13 +53,13 @@ static kmem_cache_t *kioctx_cachep;
53static struct workqueue_struct *aio_wq; 53static struct workqueue_struct *aio_wq;
54 54
55/* Used for rare fput completion. */ 55/* Used for rare fput completion. */
56static void aio_fput_routine(void *); 56static void aio_fput_routine(struct work_struct *);
57static DECLARE_WORK(fput_work, aio_fput_routine, NULL); 57static DECLARE_WORK(fput_work, aio_fput_routine);
58 58
59static DEFINE_SPINLOCK(fput_lock); 59static DEFINE_SPINLOCK(fput_lock);
60static LIST_HEAD(fput_head); 60static LIST_HEAD(fput_head);
61 61
62static void aio_kick_handler(void *); 62static void aio_kick_handler(struct work_struct *);
63static void aio_queue_work(struct kioctx *); 63static void aio_queue_work(struct kioctx *);
64 64
65/* aio_setup 65/* aio_setup
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
227 227
228 INIT_LIST_HEAD(&ctx->active_reqs); 228 INIT_LIST_HEAD(&ctx->active_reqs);
229 INIT_LIST_HEAD(&ctx->run_list); 229 INIT_LIST_HEAD(&ctx->run_list);
230 INIT_WORK(&ctx->wq, aio_kick_handler, ctx); 230 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
231 231
232 if (aio_setup_ring(ctx) < 0) 232 if (aio_setup_ring(ctx) < 0)
233 goto out_freectx; 233 goto out_freectx;
@@ -469,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
469 wake_up(&ctx->wait); 469 wake_up(&ctx->wait);
470} 470}
471 471
472static void aio_fput_routine(void *data) 472static void aio_fput_routine(struct work_struct *data)
473{ 473{
474 spin_lock_irq(&fput_lock); 474 spin_lock_irq(&fput_lock);
475 while (likely(!list_empty(&fput_head))) { 475 while (likely(!list_empty(&fput_head))) {
@@ -857,9 +857,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx)
857 * space. 857 * space.
858 * Run on aiod's context. 858 * Run on aiod's context.
859 */ 859 */
860static void aio_kick_handler(void *data) 860static void aio_kick_handler(struct work_struct *work)
861{ 861{
862 struct kioctx *ctx = data; 862 struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
863 mm_segment_t oldfs = get_fs(); 863 mm_segment_t oldfs = get_fs();
864 int requeue; 864 int requeue;
865 865
@@ -874,7 +874,7 @@ static void aio_kick_handler(void *data)
874 * we're in a worker thread already, don't use queue_delayed_work, 874 * we're in a worker thread already, don't use queue_delayed_work,
875 */ 875 */
876 if (requeue) 876 if (requeue)
877 queue_work(aio_wq, &ctx->wq); 877 queue_delayed_work(aio_wq, &ctx->wq, 0);
878} 878}
879 879
880 880
diff --git a/fs/bio.c b/fs/bio.c
index aa4d09bd4e71..50c40ce2cead 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -940,16 +940,16 @@ static void bio_release_pages(struct bio *bio)
940 * run one bio_put() against the BIO. 940 * run one bio_put() against the BIO.
941 */ 941 */
942 942
943static void bio_dirty_fn(void *data); 943static void bio_dirty_fn(struct work_struct *work);
944 944
945static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); 945static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
946static DEFINE_SPINLOCK(bio_dirty_lock); 946static DEFINE_SPINLOCK(bio_dirty_lock);
947static struct bio *bio_dirty_list; 947static struct bio *bio_dirty_list;
948 948
949/* 949/*
950 * This runs in process context 950 * This runs in process context
951 */ 951 */
952static void bio_dirty_fn(void *data) 952static void bio_dirty_fn(struct work_struct *work)
953{ 953{
954 unsigned long flags; 954 unsigned long flags;
955 struct bio *bio; 955 struct bio *bio;
diff --git a/fs/file.c b/fs/file.c
index 8e81775c5dc8..3787e82f54c1 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -91,8 +91,10 @@ out:
91 spin_unlock(&fddef->lock); 91 spin_unlock(&fddef->lock);
92} 92}
93 93
94static void free_fdtable_work(struct fdtable_defer *f) 94static void free_fdtable_work(struct work_struct *work)
95{ 95{
96 struct fdtable_defer *f =
97 container_of(work, struct fdtable_defer, wq);
96 struct fdtable *fdt; 98 struct fdtable *fdt;
97 99
98 spin_lock_bh(&f->lock); 100 spin_lock_bh(&f->lock);
@@ -351,7 +353,7 @@ static void __devinit fdtable_defer_list_init(int cpu)
351{ 353{
352 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 354 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
353 spin_lock_init(&fddef->lock); 355 spin_lock_init(&fddef->lock);
354 INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); 356 INIT_WORK(&fddef->wq, free_fdtable_work);
355 init_timer(&fddef->timer); 357 init_timer(&fddef->timer);
356 fddef->timer.data = (unsigned long)fddef; 358 fddef->timer.data = (unsigned long)fddef;
357 fddef->timer.function = fdtable_timer; 359 fddef->timer.function = fdtable_timer;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 78fe0fae23ff..55f5333dae99 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -35,7 +35,7 @@
35 35
36struct greedy { 36struct greedy {
37 struct gfs2_holder gr_gh; 37 struct gfs2_holder gr_gh;
38 struct work_struct gr_work; 38 struct delayed_work gr_work;
39}; 39};
40 40
41struct gfs2_gl_hash_bucket { 41struct gfs2_gl_hash_bucket {
@@ -1368,9 +1368,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1368 glops->go_xmote_th(gl, state, flags); 1368 glops->go_xmote_th(gl, state, flags);
1369} 1369}
1370 1370
1371static void greedy_work(void *data) 1371static void greedy_work(struct work_struct *work)
1372{ 1372{
1373 struct greedy *gr = data; 1373 struct greedy *gr = container_of(work, struct greedy, gr_work.work);
1374 struct gfs2_holder *gh = &gr->gr_gh; 1374 struct gfs2_holder *gh = &gr->gr_gh;
1375 struct gfs2_glock *gl = gh->gh_gl; 1375 struct gfs2_glock *gl = gh->gh_gl;
1376 const struct gfs2_glock_operations *glops = gl->gl_ops; 1376 const struct gfs2_glock_operations *glops = gl->gl_ops;
@@ -1422,7 +1422,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1422 1422
1423 gfs2_holder_init(gl, 0, 0, gh); 1423 gfs2_holder_init(gl, 0, 0, gh);
1424 set_bit(HIF_GREEDY, &gh->gh_iflags); 1424 set_bit(HIF_GREEDY, &gh->gh_iflags);
1425 INIT_WORK(&gr->gr_work, greedy_work, gr); 1425 INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
1426 1426
1427 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); 1427 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1428 schedule_delayed_work(&gr->gr_work, time); 1428 schedule_delayed_work(&gr->gr_work, time);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 42e3bef270c9..72dad552aa00 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
577 server->rcv.ptr = (unsigned char*)&server->rcv.buf; 577 server->rcv.ptr = (unsigned char*)&server->rcv.buf;
578 server->rcv.len = 10; 578 server->rcv.len = 10;
579 server->rcv.state = 0; 579 server->rcv.state = 0;
580 INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server); 580 INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
581 INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server); 581 INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
582 sock->sk->sk_write_space = ncp_tcp_write_space; 582 sock->sk->sk_write_space = ncp_tcp_write_space;
583 } else { 583 } else {
584 INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server); 584 INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
585 INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server); 585 INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
586 server->timeout_tm.data = (unsigned long)server; 586 server->timeout_tm.data = (unsigned long)server;
587 server->timeout_tm.function = ncpdgram_timeout_call; 587 server->timeout_tm.function = ncpdgram_timeout_call;
588 } 588 }
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 11c2b252ebed..e496d8b65e92 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -350,9 +350,10 @@ static void info_server(struct ncp_server *server, unsigned int id, const void *
350 } 350 }
351} 351}
352 352
353void ncpdgram_rcv_proc(void *s) 353void ncpdgram_rcv_proc(struct work_struct *work)
354{ 354{
355 struct ncp_server *server = s; 355 struct ncp_server *server =
356 container_of(work, struct ncp_server, rcv.tq);
356 struct socket* sock; 357 struct socket* sock;
357 358
358 sock = server->ncp_sock; 359 sock = server->ncp_sock;
@@ -468,9 +469,10 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server)
468 } 469 }
469} 470}
470 471
471void ncpdgram_timeout_proc(void *s) 472void ncpdgram_timeout_proc(struct work_struct *work)
472{ 473{
473 struct ncp_server *server = s; 474 struct ncp_server *server =
475 container_of(work, struct ncp_server, timeout_tq);
474 mutex_lock(&server->rcv.creq_mutex); 476 mutex_lock(&server->rcv.creq_mutex);
475 __ncpdgram_timeout_proc(server); 477 __ncpdgram_timeout_proc(server);
476 mutex_unlock(&server->rcv.creq_mutex); 478 mutex_unlock(&server->rcv.creq_mutex);
@@ -652,18 +654,20 @@ skipdata:;
652 } 654 }
653} 655}
654 656
655void ncp_tcp_rcv_proc(void *s) 657void ncp_tcp_rcv_proc(struct work_struct *work)
656{ 658{
657 struct ncp_server *server = s; 659 struct ncp_server *server =
660 container_of(work, struct ncp_server, rcv.tq);
658 661
659 mutex_lock(&server->rcv.creq_mutex); 662 mutex_lock(&server->rcv.creq_mutex);
660 __ncptcp_rcv_proc(server); 663 __ncptcp_rcv_proc(server);
661 mutex_unlock(&server->rcv.creq_mutex); 664 mutex_unlock(&server->rcv.creq_mutex);
662} 665}
663 666
664void ncp_tcp_tx_proc(void *s) 667void ncp_tcp_tx_proc(struct work_struct *work)
665{ 668{
666 struct ncp_server *server = s; 669 struct ncp_server *server =
670 container_of(work, struct ncp_server, tx.tq);
667 671
668 mutex_lock(&server->rcv.creq_mutex); 672 mutex_lock(&server->rcv.creq_mutex);
669 __ncptcp_try_send(server); 673 __ncptcp_try_send(server);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 5fea638743e4..23ab145daa2d 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
143 INIT_LIST_HEAD(&clp->cl_state_owners); 143 INIT_LIST_HEAD(&clp->cl_state_owners);
144 INIT_LIST_HEAD(&clp->cl_unused); 144 INIT_LIST_HEAD(&clp->cl_unused);
145 spin_lock_init(&clp->cl_lock); 145 spin_lock_init(&clp->cl_lock);
146 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); 146 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
148 clp->cl_boot_time = CURRENT_TIME; 148 clp->cl_boot_time = CURRENT_TIME;
149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; 149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index ec1114b33d89..371b804e7cc8 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -18,10 +18,10 @@
18 18
19#define NFSDBG_FACILITY NFSDBG_VFS 19#define NFSDBG_FACILITY NFSDBG_VFS
20 20
21static void nfs_expire_automounts(void *list); 21static void nfs_expire_automounts(struct work_struct *work);
22 22
23LIST_HEAD(nfs_automount_list); 23LIST_HEAD(nfs_automount_list);
24static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); 24static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
25int nfs_mountpoint_expiry_timeout = 500 * HZ; 25int nfs_mountpoint_expiry_timeout = 500 * HZ;
26 26
27static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, 27static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = {
164 .follow_link = nfs_follow_mountpoint, 164 .follow_link = nfs_follow_mountpoint,
165}; 165};
166 166
167static void nfs_expire_automounts(void *data) 167static void nfs_expire_automounts(struct work_struct *work)
168{ 168{
169 struct list_head *list = (struct list_head *)data; 169 struct list_head *list = &nfs_automount_list;
170 170
171 mark_mounts_for_expiry(list); 171 mark_mounts_for_expiry(list);
172 if (!list_empty(list)) 172 if (!list_empty(list))
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6f346677332d..c26cd978c7cc 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2];
185extern void nfs4_schedule_state_renewal(struct nfs_client *); 185extern void nfs4_schedule_state_renewal(struct nfs_client *);
186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 186extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
187extern void nfs4_kill_renewd(struct nfs_client *); 187extern void nfs4_kill_renewd(struct nfs_client *);
188extern void nfs4_renew_state(void *); 188extern void nfs4_renew_state(struct work_struct *);
189 189
190/* nfs4state.c */ 190/* nfs4state.c */
191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); 191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 7b6df1852e75..823298561c0a 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,9 +59,10 @@
59#define NFSDBG_FACILITY NFSDBG_PROC 59#define NFSDBG_FACILITY NFSDBG_PROC
60 60
61void 61void
62nfs4_renew_state(void *data) 62nfs4_renew_state(struct work_struct *work)
63{ 63{
64 struct nfs_client *clp = (struct nfs_client *)data; 64 struct nfs_client *clp =
65 container_of(work, struct nfs_client, cl_renewd.work);
65 struct rpc_cred *cred; 66 struct rpc_cred *cred;
66 long lease, timeout; 67 long lease, timeout;
67 unsigned long last, now; 68 unsigned long last, now;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 293b6495829f..e431e93ab503 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1829,9 +1829,8 @@ out:
1829} 1829}
1830 1830
1831static struct workqueue_struct *laundry_wq; 1831static struct workqueue_struct *laundry_wq;
1832static struct work_struct laundromat_work; 1832static void laundromat_main(struct work_struct *);
1833static void laundromat_main(void *); 1833static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
1834static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
1835 1834
1836__be32 1835__be32
1837nfsd4_renew(clientid_t *clid) 1836nfsd4_renew(clientid_t *clid)
@@ -1940,7 +1939,7 @@ nfs4_laundromat(void)
1940} 1939}
1941 1940
1942void 1941void
1943laundromat_main(void *not_used) 1942laundromat_main(struct work_struct *not_used)
1944{ 1943{
1945 time_t t; 1944 time_t t;
1946 1945
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 85a048b7a67b..edc91ca3792a 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1197,10 +1197,12 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
1197 return status; 1197 return status;
1198} 1198}
1199 1199
1200static void ocfs2_truncate_log_worker(void *data) 1200static void ocfs2_truncate_log_worker(struct work_struct *work)
1201{ 1201{
1202 int status; 1202 int status;
1203 struct ocfs2_super *osb = data; 1203 struct ocfs2_super *osb =
1204 container_of(work, struct ocfs2_super,
1205 osb_truncate_log_wq.work);
1204 1206
1205 mlog_entry_void(); 1207 mlog_entry_void();
1206 1208
@@ -1432,7 +1434,8 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
1432 /* ocfs2_truncate_log_shutdown keys on the existence of 1434 /* ocfs2_truncate_log_shutdown keys on the existence of
1433 * osb->osb_tl_inode so we don't set any of the osb variables 1435 * osb->osb_tl_inode so we don't set any of the osb variables
1434 * until we're sure all is well. */ 1436 * until we're sure all is well. */
1435 INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); 1437 INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
1438 ocfs2_truncate_log_worker);
1436 osb->osb_tl_bh = tl_bh; 1439 osb->osb_tl_bh = tl_bh;
1437 osb->osb_tl_inode = tl_inode; 1440 osb->osb_tl_inode = tl_inode;
1438 1441
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 305cba3681fe..4cd9a9580456 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -141,7 +141,7 @@ struct o2hb_region {
141 * recognizes a node going up and down in one iteration */ 141 * recognizes a node going up and down in one iteration */
142 u64 hr_generation; 142 u64 hr_generation;
143 143
144 struct work_struct hr_write_timeout_work; 144 struct delayed_work hr_write_timeout_work;
145 unsigned long hr_last_timeout_start; 145 unsigned long hr_last_timeout_start;
146 146
147 /* Used during o2hb_check_slot to hold a copy of the block 147 /* Used during o2hb_check_slot to hold a copy of the block
@@ -156,9 +156,11 @@ struct o2hb_bio_wait_ctxt {
156 int wc_error; 156 int wc_error;
157}; 157};
158 158
159static void o2hb_write_timeout(void *arg) 159static void o2hb_write_timeout(struct work_struct *work)
160{ 160{
161 struct o2hb_region *reg = arg; 161 struct o2hb_region *reg =
162 container_of(work, struct o2hb_region,
163 hr_write_timeout_work.work);
162 164
163 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " 165 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
164 "milliseconds\n", reg->hr_dev_name, 166 "milliseconds\n", reg->hr_dev_name,
@@ -1404,7 +1406,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1404 goto out; 1406 goto out;
1405 } 1407 }
1406 1408
1407 INIT_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout, reg); 1409 INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
1408 1410
1409 /* 1411 /*
1410 * A node is considered live after it has beat LIVE_THRESHOLD 1412 * A node is considered live after it has beat LIVE_THRESHOLD
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 7bba98fbfc15..4705d659fe57 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -88,7 +88,7 @@ void o2quo_disk_timeout(void)
88 o2quo_fence_self(); 88 o2quo_fence_self();
89} 89}
90 90
91static void o2quo_make_decision(void *arg) 91static void o2quo_make_decision(struct work_struct *work)
92{ 92{
93 int quorum; 93 int quorum;
94 int lowest_hb, lowest_reachable = 0, fence = 0; 94 int lowest_hb, lowest_reachable = 0, fence = 0;
@@ -306,7 +306,7 @@ void o2quo_init(void)
306 struct o2quo_state *qs = &o2quo_state; 306 struct o2quo_state *qs = &o2quo_state;
307 307
308 spin_lock_init(&qs->qs_lock); 308 spin_lock_init(&qs->qs_lock);
309 INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL); 309 INIT_WORK(&qs->qs_work, o2quo_make_decision);
310} 310}
311 311
312void o2quo_exit(void) 312void o2quo_exit(void)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index b650efa8c8be..9b3209dc0b16 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] =
140 [O2NET_ERR_DIED] = -EHOSTDOWN,}; 140 [O2NET_ERR_DIED] = -EHOSTDOWN,};
141 141
142/* can't quite avoid *all* internal declarations :/ */ 142/* can't quite avoid *all* internal declarations :/ */
143static void o2net_sc_connect_completed(void *arg); 143static void o2net_sc_connect_completed(struct work_struct *work);
144static void o2net_rx_until_empty(void *arg); 144static void o2net_rx_until_empty(struct work_struct *work);
145static void o2net_shutdown_sc(void *arg); 145static void o2net_shutdown_sc(struct work_struct *work);
146static void o2net_listen_data_ready(struct sock *sk, int bytes); 146static void o2net_listen_data_ready(struct sock *sk, int bytes);
147static void o2net_sc_send_keep_req(void *arg); 147static void o2net_sc_send_keep_req(struct work_struct *work);
148static void o2net_idle_timer(unsigned long data); 148static void o2net_idle_timer(unsigned long data);
149static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); 149static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
150 150
@@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
308 o2nm_node_get(node); 308 o2nm_node_get(node);
309 sc->sc_node = node; 309 sc->sc_node = node;
310 310
311 INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc); 311 INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
312 INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc); 312 INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
313 INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc); 313 INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
314 INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc); 314 INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
315 315
316 init_timer(&sc->sc_idle_timeout); 316 init_timer(&sc->sc_idle_timeout);
317 sc->sc_idle_timeout.function = o2net_idle_timer; 317 sc->sc_idle_timeout.function = o2net_idle_timer;
@@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc,
342 sc_put(sc); 342 sc_put(sc);
343} 343}
344static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, 344static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
345 struct work_struct *work, 345 struct delayed_work *work,
346 int delay) 346 int delay)
347{ 347{
348 sc_get(sc); 348 sc_get(sc);
@@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
350 sc_put(sc); 350 sc_put(sc);
351} 351}
352static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, 352static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
353 struct work_struct *work) 353 struct delayed_work *work)
354{ 354{
355 if (cancel_delayed_work(work)) 355 if (cancel_delayed_work(work))
356 sc_put(sc); 356 sc_put(sc);
@@ -564,9 +564,11 @@ static void o2net_ensure_shutdown(struct o2net_node *nn,
564 * ourselves as state_change couldn't get the nn_lock and call set_nn_state 564 * ourselves as state_change couldn't get the nn_lock and call set_nn_state
565 * itself. 565 * itself.
566 */ 566 */
567static void o2net_shutdown_sc(void *arg) 567static void o2net_shutdown_sc(struct work_struct *work)
568{ 568{
569 struct o2net_sock_container *sc = arg; 569 struct o2net_sock_container *sc =
570 container_of(work, struct o2net_sock_container,
571 sc_shutdown_work);
570 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 572 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
571 573
572 sclog(sc, "shutting down\n"); 574 sclog(sc, "shutting down\n");
@@ -1201,9 +1203,10 @@ out:
1201/* this work func is triggerd by data ready. it reads until it can read no 1203/* this work func is triggerd by data ready. it reads until it can read no
1202 * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing 1204 * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
1203 * our work the work struct will be marked and we'll be called again. */ 1205 * our work the work struct will be marked and we'll be called again. */
1204static void o2net_rx_until_empty(void *arg) 1206static void o2net_rx_until_empty(struct work_struct *work)
1205{ 1207{
1206 struct o2net_sock_container *sc = arg; 1208 struct o2net_sock_container *sc =
1209 container_of(work, struct o2net_sock_container, sc_rx_work);
1207 int ret; 1210 int ret;
1208 1211
1209 do { 1212 do {
@@ -1249,9 +1252,11 @@ static int o2net_set_nodelay(struct socket *sock)
1249 1252
1250/* called when a connect completes and after a sock is accepted. the 1253/* called when a connect completes and after a sock is accepted. the
1251 * rx path will see the response and mark the sc valid */ 1254 * rx path will see the response and mark the sc valid */
1252static void o2net_sc_connect_completed(void *arg) 1255static void o2net_sc_connect_completed(struct work_struct *work)
1253{ 1256{
1254 struct o2net_sock_container *sc = arg; 1257 struct o2net_sock_container *sc =
1258 container_of(work, struct o2net_sock_container,
1259 sc_connect_work);
1255 1260
1256 mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", 1261 mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
1257 (unsigned long long)O2NET_PROTOCOL_VERSION, 1262 (unsigned long long)O2NET_PROTOCOL_VERSION,
@@ -1262,9 +1267,11 @@ static void o2net_sc_connect_completed(void *arg)
1262} 1267}
1263 1268
1264/* this is called as a work_struct func. */ 1269/* this is called as a work_struct func. */
1265static void o2net_sc_send_keep_req(void *arg) 1270static void o2net_sc_send_keep_req(struct work_struct *work)
1266{ 1271{
1267 struct o2net_sock_container *sc = arg; 1272 struct o2net_sock_container *sc =
1273 container_of(work, struct o2net_sock_container,
1274 sc_keepalive_work.work);
1268 1275
1269 o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); 1276 o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
1270 sc_put(sc); 1277 sc_put(sc);
@@ -1314,14 +1321,15 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
1314 * having a connect attempt fail, etc. This centralizes the logic which decides 1321 * having a connect attempt fail, etc. This centralizes the logic which decides
1315 * if a connect attempt should be made or if we should give up and all future 1322 * if a connect attempt should be made or if we should give up and all future
1316 * transmit attempts should fail */ 1323 * transmit attempts should fail */
1317static void o2net_start_connect(void *arg) 1324static void o2net_start_connect(struct work_struct *work)
1318{ 1325{
1319 struct o2net_node *nn = arg; 1326 struct o2net_node *nn =
1327 container_of(work, struct o2net_node, nn_connect_work.work);
1320 struct o2net_sock_container *sc = NULL; 1328 struct o2net_sock_container *sc = NULL;
1321 struct o2nm_node *node = NULL, *mynode = NULL; 1329 struct o2nm_node *node = NULL, *mynode = NULL;
1322 struct socket *sock = NULL; 1330 struct socket *sock = NULL;
1323 struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; 1331 struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
1324 int ret = 0; 1332 int ret = 0, stop;
1325 1333
1326 /* if we're greater we initiate tx, otherwise we accept */ 1334 /* if we're greater we initiate tx, otherwise we accept */
1327 if (o2nm_this_node() <= o2net_num_from_nn(nn)) 1335 if (o2nm_this_node() <= o2net_num_from_nn(nn))
@@ -1342,10 +1350,9 @@ static void o2net_start_connect(void *arg)
1342 1350
1343 spin_lock(&nn->nn_lock); 1351 spin_lock(&nn->nn_lock);
1344 /* see if we already have one pending or have given up */ 1352 /* see if we already have one pending or have given up */
1345 if (nn->nn_sc || nn->nn_persistent_error) 1353 stop = (nn->nn_sc || nn->nn_persistent_error);
1346 arg = NULL;
1347 spin_unlock(&nn->nn_lock); 1354 spin_unlock(&nn->nn_lock);
1348 if (arg == NULL) /* *shrug*, needed some indicator */ 1355 if (stop)
1349 goto out; 1356 goto out;
1350 1357
1351 nn->nn_last_connect_attempt = jiffies; 1358 nn->nn_last_connect_attempt = jiffies;
@@ -1421,9 +1428,10 @@ out:
1421 return; 1428 return;
1422} 1429}
1423 1430
1424static void o2net_connect_expired(void *arg) 1431static void o2net_connect_expired(struct work_struct *work)
1425{ 1432{
1426 struct o2net_node *nn = arg; 1433 struct o2net_node *nn =
1434 container_of(work, struct o2net_node, nn_connect_expired.work);
1427 1435
1428 spin_lock(&nn->nn_lock); 1436 spin_lock(&nn->nn_lock);
1429 if (!nn->nn_sc_valid) { 1437 if (!nn->nn_sc_valid) {
@@ -1436,9 +1444,10 @@ static void o2net_connect_expired(void *arg)
1436 spin_unlock(&nn->nn_lock); 1444 spin_unlock(&nn->nn_lock);
1437} 1445}
1438 1446
1439static void o2net_still_up(void *arg) 1447static void o2net_still_up(struct work_struct *work)
1440{ 1448{
1441 struct o2net_node *nn = arg; 1449 struct o2net_node *nn =
1450 container_of(work, struct o2net_node, nn_still_up.work);
1442 1451
1443 o2quo_hb_still_up(o2net_num_from_nn(nn)); 1452 o2quo_hb_still_up(o2net_num_from_nn(nn));
1444} 1453}
@@ -1644,9 +1653,9 @@ out:
1644 return ret; 1653 return ret;
1645} 1654}
1646 1655
1647static void o2net_accept_many(void *arg) 1656static void o2net_accept_many(struct work_struct *work)
1648{ 1657{
1649 struct socket *sock = arg; 1658 struct socket *sock = o2net_listen_sock;
1650 while (o2net_accept_one(sock) == 0) 1659 while (o2net_accept_one(sock) == 0)
1651 cond_resched(); 1660 cond_resched();
1652} 1661}
@@ -1700,7 +1709,7 @@ static int o2net_open_listening_sock(__be16 port)
1700 write_unlock_bh(&sock->sk->sk_callback_lock); 1709 write_unlock_bh(&sock->sk->sk_callback_lock);
1701 1710
1702 o2net_listen_sock = sock; 1711 o2net_listen_sock = sock;
1703 INIT_WORK(&o2net_listen_work, o2net_accept_many, sock); 1712 INIT_WORK(&o2net_listen_work, o2net_accept_many);
1704 1713
1705 sock->sk->sk_reuse = 1; 1714 sock->sk->sk_reuse = 1;
1706 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 1715 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
@@ -1819,9 +1828,10 @@ int o2net_init(void)
1819 struct o2net_node *nn = o2net_nn_from_num(i); 1828 struct o2net_node *nn = o2net_nn_from_num(i);
1820 1829
1821 spin_lock_init(&nn->nn_lock); 1830 spin_lock_init(&nn->nn_lock);
1822 INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn); 1831 INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
1823 INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn); 1832 INIT_DELAYED_WORK(&nn->nn_connect_expired,
1824 INIT_WORK(&nn->nn_still_up, o2net_still_up, nn); 1833 o2net_connect_expired);
1834 INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
1825 /* until we see hb from a node we'll return einval */ 1835 /* until we see hb from a node we'll return einval */
1826 nn->nn_persistent_error = -ENOTCONN; 1836 nn->nn_persistent_error = -ENOTCONN;
1827 init_waitqueue_head(&nn->nn_sc_wq); 1837 init_waitqueue_head(&nn->nn_sc_wq);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 4b46aac7d243..daebbd3a2c8c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -86,18 +86,18 @@ struct o2net_node {
86 * connect attempt fails and so can be self-arming. shutdown is 86 * connect attempt fails and so can be self-arming. shutdown is
87 * careful to first mark the nn such that no connects will be attempted 87 * careful to first mark the nn such that no connects will be attempted
88 * before canceling delayed connect work and flushing the queue. */ 88 * before canceling delayed connect work and flushing the queue. */
89 struct work_struct nn_connect_work; 89 struct delayed_work nn_connect_work;
90 unsigned long nn_last_connect_attempt; 90 unsigned long nn_last_connect_attempt;
91 91
92 /* this is queued as nodes come up and is canceled when a connection is 92 /* this is queued as nodes come up and is canceled when a connection is
93 * established. this expiring gives up on the node and errors out 93 * established. this expiring gives up on the node and errors out
94 * transmits */ 94 * transmits */
95 struct work_struct nn_connect_expired; 95 struct delayed_work nn_connect_expired;
96 96
97 /* after we give up on a socket we wait a while before deciding 97 /* after we give up on a socket we wait a while before deciding
98 * that it is still heartbeating and that we should do some 98 * that it is still heartbeating and that we should do some
99 * quorum work */ 99 * quorum work */
100 struct work_struct nn_still_up; 100 struct delayed_work nn_still_up;
101}; 101};
102 102
103struct o2net_sock_container { 103struct o2net_sock_container {
@@ -129,7 +129,7 @@ struct o2net_sock_container {
129 struct work_struct sc_shutdown_work; 129 struct work_struct sc_shutdown_work;
130 130
131 struct timer_list sc_idle_timeout; 131 struct timer_list sc_idle_timeout;
132 struct work_struct sc_keepalive_work; 132 struct delayed_work sc_keepalive_work;
133 133
134 unsigned sc_handshake_ok:1; 134 unsigned sc_handshake_ok:1;
135 135
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index fa968180b072..6b6ff76538c5 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned
153 * called functions that cannot be directly called from the 153 * called functions that cannot be directly called from the
154 * net message handlers for some reason, usually because 154 * net message handlers for some reason, usually because
155 * they need to send net messages of their own. */ 155 * they need to send net messages of their own. */
156void dlm_dispatch_work(void *data); 156void dlm_dispatch_work(struct work_struct *work);
157 157
158struct dlm_lock_resource; 158struct dlm_lock_resource;
159struct dlm_work_item; 159struct dlm_work_item;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index f6cdab3a2c6a..420a375a3949 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1297,7 +1297,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1297 1297
1298 spin_lock_init(&dlm->work_lock); 1298 spin_lock_init(&dlm->work_lock);
1299 INIT_LIST_HEAD(&dlm->work_list); 1299 INIT_LIST_HEAD(&dlm->work_list);
1300 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); 1300 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
1301 1301
1302 kref_init(&dlm->dlm_refs); 1302 kref_init(&dlm->dlm_refs);
1303 dlm->dlm_state = DLM_CTXT_NEW; 1303 dlm->dlm_state = DLM_CTXT_NEW;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 9d950d7cea38..fb3e2b0817f1 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -153,9 +153,10 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
153} 153}
154 154
155/* Worker function used during recovery. */ 155/* Worker function used during recovery. */
156void dlm_dispatch_work(void *data) 156void dlm_dispatch_work(struct work_struct *work)
157{ 157{
158 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; 158 struct dlm_ctxt *dlm =
159 container_of(work, struct dlm_ctxt, dispatched_work);
159 LIST_HEAD(tmp_list); 160 LIST_HEAD(tmp_list);
160 struct list_head *iter, *iter2; 161 struct list_head *iter, *iter2;
161 struct dlm_work_item *item; 162 struct dlm_work_item *item;
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index eead48bbfac6..7d2f578b267d 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -171,15 +171,14 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
171 BUG(); 171 BUG();
172} 172}
173 173
174static void user_dlm_unblock_lock(void *opaque); 174static void user_dlm_unblock_lock(struct work_struct *work);
175 175
176static void __user_dlm_queue_lockres(struct user_lock_res *lockres) 176static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
177{ 177{
178 if (!(lockres->l_flags & USER_LOCK_QUEUED)) { 178 if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
179 user_dlm_grab_inode_ref(lockres); 179 user_dlm_grab_inode_ref(lockres);
180 180
181 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, 181 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
182 lockres);
183 182
184 queue_work(user_dlm_worker, &lockres->l_work); 183 queue_work(user_dlm_worker, &lockres->l_work);
185 lockres->l_flags |= USER_LOCK_QUEUED; 184 lockres->l_flags |= USER_LOCK_QUEUED;
@@ -279,10 +278,11 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
279 iput(inode); 278 iput(inode);
280} 279}
281 280
282static void user_dlm_unblock_lock(void *opaque) 281static void user_dlm_unblock_lock(struct work_struct *work)
283{ 282{
284 int new_level, status; 283 int new_level, status;
285 struct user_lock_res *lockres = (struct user_lock_res *) opaque; 284 struct user_lock_res *lockres =
285 container_of(work, struct user_lock_res, l_work);
286 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); 286 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
287 287
288 mlog(0, "processing lockres %.*s\n", lockres->l_namelen, 288 mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index c0ad7cb59521..1d7f4ab1e5ed 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -703,11 +703,12 @@ struct ocfs2_la_recovery_item {
703 * NOTE: This function can and will sleep on recovery of other nodes 703 * NOTE: This function can and will sleep on recovery of other nodes
704 * during cluster locking, just like any other ocfs2 process. 704 * during cluster locking, just like any other ocfs2 process.
705 */ 705 */
706void ocfs2_complete_recovery(void *data) 706void ocfs2_complete_recovery(struct work_struct *work)
707{ 707{
708 int ret; 708 int ret;
709 struct ocfs2_super *osb = data; 709 struct ocfs2_journal *journal =
710 struct ocfs2_journal *journal = osb->journal; 710 container_of(work, struct ocfs2_journal, j_recovery_work);
711 struct ocfs2_super *osb = journal->j_osb;
711 struct ocfs2_dinode *la_dinode, *tl_dinode; 712 struct ocfs2_dinode *la_dinode, *tl_dinode;
712 struct ocfs2_la_recovery_item *item; 713 struct ocfs2_la_recovery_item *item;
713 struct list_head *p, *n; 714 struct list_head *p, *n;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index d86cb960b7ec..899112ad8136 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -133,7 +133,7 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb,
133} 133}
134 134
135/* Exported only for the journal struct init code in super.c. Do not call. */ 135/* Exported only for the journal struct init code in super.c. Do not call. */
136void ocfs2_complete_recovery(void *data); 136void ocfs2_complete_recovery(struct work_struct *work);
137 137
138/* 138/*
139 * Journal Control: 139 * Journal Control:
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 078883772bd6..b767fd7da6eb 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -285,7 +285,7 @@ struct ocfs2_super
285 /* Truncate log info */ 285 /* Truncate log info */
286 struct inode *osb_tl_inode; 286 struct inode *osb_tl_inode;
287 struct buffer_head *osb_tl_bh; 287 struct buffer_head *osb_tl_bh;
288 struct work_struct osb_truncate_log_wq; 288 struct delayed_work osb_truncate_log_wq;
289 289
290 struct ocfs2_node_map osb_recovering_orphan_dirs; 290 struct ocfs2_node_map osb_recovering_orphan_dirs;
291 unsigned int *osb_orphan_wipes; 291 unsigned int *osb_orphan_wipes;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index b0992573dee2..d9b4214a12da 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1365 spin_lock_init(&journal->j_lock); 1365 spin_lock_init(&journal->j_lock);
1366 journal->j_trans_id = (unsigned long) 1; 1366 journal->j_trans_id = (unsigned long) 1;
1367 INIT_LIST_HEAD(&journal->j_la_cleanups); 1367 INIT_LIST_HEAD(&journal->j_la_cleanups);
1368 INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb); 1368 INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
1369 journal->j_state = OCFS2_JOURNAL_FREE; 1369 journal->j_state = OCFS2_JOURNAL_FREE;
1370 1370
1371 /* get some pseudo constants for clustersize bits */ 1371 /* get some pseudo constants for clustersize bits */
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index ac93174c9639..7280a23ef344 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super,
104 struct reiserfs_journal *journal); 104 struct reiserfs_journal *journal);
105static int dirty_one_transaction(struct super_block *s, 105static int dirty_one_transaction(struct super_block *s,
106 struct reiserfs_journal_list *jl); 106 struct reiserfs_journal_list *jl);
107static void flush_async_commits(void *p); 107static void flush_async_commits(struct work_struct *work);
108static void queue_log_writer(struct super_block *s); 108static void queue_log_writer(struct super_block *s);
109 109
110/* values for join in do_journal_begin_r */ 110/* values for join in do_journal_begin_r */
@@ -2836,7 +2836,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2836 if (reiserfs_mounted_fs_count <= 1) 2836 if (reiserfs_mounted_fs_count <= 1)
2837 commit_wq = create_workqueue("reiserfs"); 2837 commit_wq = create_workqueue("reiserfs");
2838 2838
2839 INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); 2839 INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2840 journal->j_work_sb = p_s_sb;
2840 return 0; 2841 return 0;
2841 free_and_return: 2842 free_and_return:
2842 free_journal_ram(p_s_sb); 2843 free_journal_ram(p_s_sb);
@@ -3447,10 +3448,11 @@ int journal_end_sync(struct reiserfs_transaction_handle *th,
3447/* 3448/*
3448** writeback the pending async commits to disk 3449** writeback the pending async commits to disk
3449*/ 3450*/
3450static void flush_async_commits(void *p) 3451static void flush_async_commits(struct work_struct *work)
3451{ 3452{
3452 struct super_block *p_s_sb = p; 3453 struct reiserfs_journal *journal =
3453 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3454 container_of(work, struct reiserfs_journal, j_work.work);
3455 struct super_block *p_s_sb = journal->j_work_sb;
3454 struct reiserfs_journal_list *jl; 3456 struct reiserfs_journal_list *jl;
3455 struct list_head *entry; 3457 struct list_head *entry;
3456 3458
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 09360cf1e1f2..8e6b56fc1cad 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -149,9 +149,10 @@ xfs_destroy_ioend(
149 */ 149 */
150STATIC void 150STATIC void
151xfs_end_bio_delalloc( 151xfs_end_bio_delalloc(
152 void *data) 152 struct work_struct *work)
153{ 153{
154 xfs_ioend_t *ioend = data; 154 xfs_ioend_t *ioend =
155 container_of(work, xfs_ioend_t, io_work);
155 156
156 xfs_destroy_ioend(ioend); 157 xfs_destroy_ioend(ioend);
157} 158}
@@ -161,9 +162,10 @@ xfs_end_bio_delalloc(
161 */ 162 */
162STATIC void 163STATIC void
163xfs_end_bio_written( 164xfs_end_bio_written(
164 void *data) 165 struct work_struct *work)
165{ 166{
166 xfs_ioend_t *ioend = data; 167 xfs_ioend_t *ioend =
168 container_of(work, xfs_ioend_t, io_work);
167 169
168 xfs_destroy_ioend(ioend); 170 xfs_destroy_ioend(ioend);
169} 171}
@@ -176,9 +178,10 @@ xfs_end_bio_written(
176 */ 178 */
177STATIC void 179STATIC void
178xfs_end_bio_unwritten( 180xfs_end_bio_unwritten(
179 void *data) 181 struct work_struct *work)
180{ 182{
181 xfs_ioend_t *ioend = data; 183 xfs_ioend_t *ioend =
184 container_of(work, xfs_ioend_t, io_work);
182 bhv_vnode_t *vp = ioend->io_vnode; 185 bhv_vnode_t *vp = ioend->io_vnode;
183 xfs_off_t offset = ioend->io_offset; 186 xfs_off_t offset = ioend->io_offset;
184 size_t size = ioend->io_size; 187 size_t size = ioend->io_size;
@@ -220,11 +223,11 @@ xfs_alloc_ioend(
220 ioend->io_size = 0; 223 ioend->io_size = 0;
221 224
222 if (type == IOMAP_UNWRITTEN) 225 if (type == IOMAP_UNWRITTEN)
223 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); 226 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
224 else if (type == IOMAP_DELAY) 227 else if (type == IOMAP_DELAY)
225 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); 228 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
226 else 229 else
227 INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); 230 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
228 231
229 return ioend; 232 return ioend;
230} 233}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d3382843698e..eef4a0ba11e9 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -994,9 +994,10 @@ xfs_buf_wait_unpin(
994 994
995STATIC void 995STATIC void
996xfs_buf_iodone_work( 996xfs_buf_iodone_work(
997 void *v) 997 struct work_struct *work)
998{ 998{
999 xfs_buf_t *bp = (xfs_buf_t *)v; 999 xfs_buf_t *bp =
1000 container_of(work, xfs_buf_t, b_iodone_work);
1000 1001
1001 if (bp->b_iodone) 1002 if (bp->b_iodone)
1002 (*(bp->b_iodone))(bp); 1003 (*(bp->b_iodone))(bp);
@@ -1017,10 +1018,10 @@ xfs_buf_ioend(
1017 1018
1018 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { 1019 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1019 if (schedule) { 1020 if (schedule) {
1020 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); 1021 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1021 queue_work(xfslogd_workqueue, &bp->b_iodone_work); 1022 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1022 } else { 1023 } else {
1023 xfs_buf_iodone_work(bp); 1024 xfs_buf_iodone_work(&bp->b_iodone_work);
1024 } 1025 }
1025 } else { 1026 } else {
1026 up(&bp->b_iodonesema); 1027 up(&bp->b_iodonesema);
diff --git a/include/asm-arm/arch-omap/irda.h b/include/asm-arm/arch-omap/irda.h
index 805ae3575e44..345a649ec838 100644
--- a/include/asm-arm/arch-omap/irda.h
+++ b/include/asm-arm/arch-omap/irda.h
@@ -24,7 +24,7 @@ struct omap_irda_config {
24 /* Very specific to the needs of some platforms (h3,h4) 24 /* Very specific to the needs of some platforms (h3,h4)
25 * having calls which can sleep in irda_set_speed. 25 * having calls which can sleep in irda_set_speed.
26 */ 26 */
27 struct work_struct gpio_expa; 27 struct delayed_work gpio_expa;
28 int rx_channel; 28 int rx_channel;
29 int tx_channel; 29 int tx_channel;
30 unsigned long dest_start; 30 unsigned long dest_start;
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h
index 45e7a2fd1689..7b8f874f8429 100644
--- a/include/asm-m68knommu/irq.h
+++ b/include/asm-m68knommu/irq.h
@@ -86,5 +86,6 @@ extern void (*mach_disable_irq)(unsigned int);
86#define enable_irq(x) do { } while (0) 86#define enable_irq(x) do { } while (0)
87#define disable_irq(x) do { } while (0) 87#define disable_irq(x) do { } while (0)
88#define disable_irq_nosync(x) disable_irq(x) 88#define disable_irq_nosync(x) disable_irq(x)
89#define irq_canonicalize(irq) (irq)
89 90
90#endif /* _M68K_IRQ_H_ */ 91#endif /* _M68K_IRQ_H_ */
diff --git a/include/asm-m68knommu/rtc.h b/include/asm-m68knommu/rtc.h
new file mode 100644
index 000000000000..eaf18ec83c8e
--- /dev/null
+++ b/include/asm-m68knommu/rtc.h
@@ -0,0 +1 @@
#include <asm-m68k/rtc.h>
diff --git a/include/asm-m68knommu/ucontext.h b/include/asm-m68knommu/ucontext.h
index 5d570cedbb02..713a27f901cd 100644
--- a/include/asm-m68knommu/ucontext.h
+++ b/include/asm-m68knommu/ucontext.h
@@ -5,21 +5,17 @@ typedef int greg_t;
5#define NGREG 18 5#define NGREG 18
6typedef greg_t gregset_t[NGREG]; 6typedef greg_t gregset_t[NGREG];
7 7
8#ifdef CONFIG_FPU
9typedef struct fpregset { 8typedef struct fpregset {
10 int f_pcr; 9 int f_pcr;
11 int f_psr; 10 int f_psr;
12 int f_fpiaddr; 11 int f_fpiaddr;
13 int f_fpregs[8][3]; 12 int f_fpregs[8][3];
14} fpregset_t; 13} fpregset_t;
15#endif
16 14
17struct mcontext { 15struct mcontext {
18 int version; 16 int version;
19 gregset_t gregs; 17 gregset_t gregs;
20#ifdef CONFIG_FPU
21 fpregset_t fpregs; 18 fpregset_t fpregs;
22#endif
23}; 19};
24 20
25#define MCONTEXT_VERSION 2 21#define MCONTEXT_VERSION 2
@@ -29,9 +25,7 @@ struct ucontext {
29 struct ucontext *uc_link; 25 struct ucontext *uc_link;
30 stack_t uc_stack; 26 stack_t uc_stack;
31 struct mcontext uc_mcontext; 27 struct mcontext uc_mcontext;
32#ifdef CONFIG_FPU
33 unsigned long uc_filler[80]; 28 unsigned long uc_filler[80];
34#endif
35 sigset_t uc_sigmask; /* mask last for extensibility */ 29 sigset_t uc_sigmask; /* mask last for extensibility */
36}; 30};
37 31
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 0d71c0041f13..9e350fd44d77 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -194,7 +194,7 @@ struct kioctx {
194 194
195 struct aio_ring_info ring_info; 195 struct aio_ring_info ring_info;
196 196
197 struct work_struct wq; 197 struct delayed_work wq;
198}; 198};
199 199
200/* prototypes */ 200/* prototypes */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 4c02119c6ab9..3ea1cd58de97 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -133,7 +133,7 @@ struct cn_callback_data {
133struct cn_callback_entry { 133struct cn_callback_entry {
134 struct list_head callback_entry; 134 struct list_head callback_entry;
135 struct cn_callback *cb; 135 struct cn_callback *cb;
136 struct work_struct work; 136 struct delayed_work work;
137 struct cn_queue_dev *pdev; 137 struct cn_queue_dev *pdev;
138 138
139 struct cn_callback_id id; 139 struct cn_callback_id id;
@@ -170,7 +170,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev);
170 170
171int cn_cb_equal(struct cb_id *, struct cb_id *); 171int cn_cb_equal(struct cb_id *, struct cb_id *);
172 172
173void cn_queue_wrapper(void *data); 173void cn_queue_wrapper(struct work_struct *work);
174 174
175extern int cn_already_initialized; 175extern int cn_already_initialized;
176 176
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index c115e9e840b4..1fb02e17f6f6 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -461,7 +461,7 @@ struct i2o_driver {
461 int (*reply) (struct i2o_controller *, u32, struct i2o_message *); 461 int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
462 462
463 /* Event handler */ 463 /* Event handler */
464 void (*event) (struct i2o_event *); 464 work_func_t event;
465 465
466 struct workqueue_struct *event_queue; /* Event queue */ 466 struct workqueue_struct *event_queue; /* Event queue */
467 467
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index efe0ee4cc80b..06c58c423fe1 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
158 if (t->buf.tail != NULL) 158 if (t->buf.tail != NULL)
159 t->buf.tail->commit = t->buf.tail->used; 159 t->buf.tail->commit = t->buf.tail->used;
160 spin_unlock_irqrestore(&t->buf.lock, flags); 160 spin_unlock_irqrestore(&t->buf.lock, flags);
161 schedule_work(&t->buf.work); 161 schedule_delayed_work(&t->buf.work, 0);
162} 162}
163 163
164#endif 164#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 202283b5df96..ab2754830322 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -575,8 +575,9 @@ struct ata_port {
575 struct ata_host *host; 575 struct ata_host *host;
576 struct device *dev; 576 struct device *dev;
577 577
578 struct work_struct port_task; 578 void *port_task_data;
579 struct work_struct hotplug_task; 579 struct delayed_work port_task;
580 struct delayed_work hotplug_task;
580 struct work_struct scsi_rescan_task; 581 struct work_struct scsi_rescan_task;
581 582
582 unsigned int hsm_task_state; 583 unsigned int hsm_task_state;
@@ -755,7 +756,7 @@ extern void ata_host_resume(struct ata_host *host);
755extern int ata_ratelimit(void); 756extern int ata_ratelimit(void);
756extern int ata_busy_sleep(struct ata_port *ap, 757extern int ata_busy_sleep(struct ata_port *ap,
757 unsigned long timeout_pat, unsigned long timeout); 758 unsigned long timeout_pat, unsigned long timeout);
758extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), 759extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
759 void *data, unsigned long delay); 760 void *data, unsigned long delay);
760extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 761extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
761 unsigned long interval_msec, 762 unsigned long interval_msec,
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 528e7d3fecb1..c15ae1986b98 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -110,7 +110,7 @@ struct mmc_host {
110 struct mmc_card *card_busy; /* the MMC card claiming host */ 110 struct mmc_card *card_busy; /* the MMC card claiming host */
111 struct mmc_card *card_selected; /* the selected MMC card */ 111 struct mmc_card *card_selected; /* the selected MMC card */
112 112
113 struct work_struct detect; 113 struct delayed_work detect;
114 114
115 unsigned long private[0] ____cacheline_aligned; 115 unsigned long private[0] ____cacheline_aligned;
116}; 116};
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index b089d9506283..a503052138bd 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -127,10 +127,10 @@ struct ncp_server {
127 } unexpected_packet; 127 } unexpected_packet;
128}; 128};
129 129
130extern void ncp_tcp_rcv_proc(void *server); 130extern void ncp_tcp_rcv_proc(struct work_struct *work);
131extern void ncp_tcp_tx_proc(void *server); 131extern void ncp_tcp_tx_proc(struct work_struct *work);
132extern void ncpdgram_rcv_proc(void *server); 132extern void ncpdgram_rcv_proc(struct work_struct *work);
133extern void ncpdgram_timeout_proc(void *server); 133extern void ncpdgram_timeout_proc(struct work_struct *work);
134extern void ncpdgram_timeout_call(unsigned long server); 134extern void ncpdgram_timeout_call(unsigned long server);
135extern void ncp_tcp_data_ready(struct sock* sk, int len); 135extern void ncp_tcp_data_ready(struct sock* sk, int len);
136extern void ncp_tcp_write_space(struct sock* sk); 136extern void ncp_tcp_write_space(struct sock* sk);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 2cc9867b1626..29930b71a9aa 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -32,7 +32,7 @@ struct netpoll_info {
32 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 32 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
33 struct sk_buff_head arp_tx; /* list of arp requests to reply to */ 33 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
34 struct sk_buff_head txq; 34 struct sk_buff_head txq;
35 struct work_struct tx_work; 35 struct delayed_work tx_work;
36}; 36};
37 37
38void netpoll_poll(struct netpoll *np); 38void netpoll_poll(struct netpoll *np);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7ccfc7ef0a83..95796e6924f1 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -51,7 +51,7 @@ struct nfs_client {
51 51
52 unsigned long cl_lease_time; 52 unsigned long cl_lease_time;
53 unsigned long cl_last_renewal; 53 unsigned long cl_last_renewal;
54 struct work_struct cl_renewd; 54 struct delayed_work cl_renewd;
55 55
56 struct rpc_wait_queue cl_rpcwaitq; 56 struct rpc_wait_queue cl_rpcwaitq;
57 57
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 62a7169aed8b..3a28742d86f9 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -249,7 +249,8 @@ struct reiserfs_journal {
249 int j_errno; 249 int j_errno;
250 250
251 /* when flushing ordered buffers, throttle new ordered writers */ 251 /* when flushing ordered buffers, throttle new ordered writers */
252 struct work_struct j_work; 252 struct delayed_work j_work;
253 struct super_block *j_work_sb;
253 atomic_t j_async_throttle; 254 atomic_t j_async_throttle;
254}; 255};
255 256
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 24accb483849..0e3d91b76996 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -38,7 +38,7 @@ struct rchan_buf
38 size_t subbufs_consumed; /* count of sub-buffers consumed */ 38 size_t subbufs_consumed; /* count of sub-buffers consumed */
39 struct rchan *chan; /* associated channel */ 39 struct rchan *chan; /* associated channel */
40 wait_queue_head_t read_wait; /* reader wait queue */ 40 wait_queue_head_t read_wait; /* reader wait queue */
41 struct work_struct wake_readers; /* reader wake-up work struct */ 41 struct delayed_work wake_readers; /* reader wake-up work struct */
42 struct dentry *dentry; /* channel file dentry */ 42 struct dentry *dentry; /* channel file dentry */
43 struct kref kref; /* channel buffer refcount */ 43 struct kref kref; /* channel buffer refcount */
44 struct page **page_array; /* array of current buffer pages */ 44 struct page **page_array; /* array of current buffer pages */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index a2eb9b4a9de3..4a68125b6de6 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -30,7 +30,7 @@ struct rpc_inode {
30#define RPC_PIPE_WAIT_FOR_OPEN 1 30#define RPC_PIPE_WAIT_FOR_OPEN 1
31 int flags; 31 int flags;
32 struct rpc_pipe_ops *ops; 32 struct rpc_pipe_ops *ops;
33 struct work_struct queue_timeout; 33 struct delayed_work queue_timeout;
34}; 34};
35 35
36static inline struct rpc_inode * 36static inline struct rpc_inode *
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 60394fbc4c70..3e04c1512fc4 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -177,7 +177,7 @@ struct rpc_xprt {
177 unsigned long connect_timeout, 177 unsigned long connect_timeout,
178 bind_timeout, 178 bind_timeout,
179 reestablish_timeout; 179 reestablish_timeout;
180 struct work_struct connect_worker; 180 struct delayed_work connect_worker;
181 unsigned short port; 181 unsigned short port;
182 182
183 /* 183 /*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 65321f911c1e..f717f0898238 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -53,7 +53,7 @@ struct tty_buffer {
53}; 53};
54 54
55struct tty_bufhead { 55struct tty_bufhead {
56 struct work_struct work; 56 struct delayed_work work;
57 struct semaphore pty_sem; 57 struct semaphore pty_sem;
58 spinlock_t lock; 58 spinlock_t lock;
59 struct tty_buffer *head; /* Queue head */ 59 struct tty_buffer *head; /* Queue head */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 0cd73edeef13..aab5b1b72021 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -388,7 +388,7 @@ struct usb_device {
388 388
389 int pm_usage_cnt; /* usage counter for autosuspend */ 389 int pm_usage_cnt; /* usage counter for autosuspend */
390#ifdef CONFIG_PM 390#ifdef CONFIG_PM
391 struct work_struct autosuspend; /* for delayed autosuspends */ 391 struct delayed_work autosuspend; /* for delayed autosuspends */
392 struct mutex pm_mutex; /* protects PM operations */ 392 struct mutex pm_mutex; /* protects PM operations */
393 393
394 unsigned auto_pm:1; /* autosuspend/resume in progress */ 394 unsigned auto_pm:1; /* autosuspend/resume in progress */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9bca3539a1e5..4a3ea83c6d16 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -11,12 +11,23 @@
11 11
12struct workqueue_struct; 12struct workqueue_struct;
13 13
14struct work_struct;
15typedef void (*work_func_t)(struct work_struct *work);
16
14struct work_struct { 17struct work_struct {
15 unsigned long pending; 18 /* the first word is the work queue pointer and the flags rolled into
19 * one */
20 unsigned long management;
21#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
22#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
23#define WORK_STRUCT_FLAG_MASK (3UL)
24#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
16 struct list_head entry; 25 struct list_head entry;
17 void (*func)(void *); 26 work_func_t func;
18 void *data; 27};
19 void *wq_data; 28
29struct delayed_work {
30 struct work_struct work;
20 struct timer_list timer; 31 struct timer_list timer;
21}; 32};
22 33
@@ -24,36 +35,117 @@ struct execute_work {
24 struct work_struct work; 35 struct work_struct work;
25}; 36};
26 37
27#define __WORK_INITIALIZER(n, f, d) { \ 38#define __WORK_INITIALIZER(n, f) { \
39 .management = 0, \
40 .entry = { &(n).entry, &(n).entry }, \
41 .func = (f), \
42 }
43
44#define __WORK_INITIALIZER_NAR(n, f) { \
45 .management = (1 << WORK_STRUCT_NOAUTOREL), \
28 .entry = { &(n).entry, &(n).entry }, \ 46 .entry = { &(n).entry, &(n).entry }, \
29 .func = (f), \ 47 .func = (f), \
30 .data = (d), \ 48 }
49
50#define __DELAYED_WORK_INITIALIZER(n, f) { \
51 .work = __WORK_INITIALIZER((n).work, (f)), \
52 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
53 }
54
55#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \
56 .work = __WORK_INITIALIZER_NAR((n).work, (f)), \
31 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 57 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
32 } 58 }
33 59
34#define DECLARE_WORK(n, f, d) \ 60#define DECLARE_WORK(n, f) \
35 struct work_struct n = __WORK_INITIALIZER(n, f, d) 61 struct work_struct n = __WORK_INITIALIZER(n, f)
62
63#define DECLARE_WORK_NAR(n, f) \
64 struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
65
66#define DECLARE_DELAYED_WORK(n, f) \
67 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
68
69#define DECLARE_DELAYED_WORK_NAR(n, f) \
70 struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
36 71
37/* 72/*
38 * initialize a work-struct's func and data pointers: 73 * initialize a work item's function pointer
39 */ 74 */
40#define PREPARE_WORK(_work, _func, _data) \ 75#define PREPARE_WORK(_work, _func) \
41 do { \ 76 do { \
42 (_work)->func = _func; \ 77 (_work)->func = (_func); \
43 (_work)->data = _data; \
44 } while (0) 78 } while (0)
45 79
80#define PREPARE_DELAYED_WORK(_work, _func) \
81 PREPARE_WORK(&(_work)->work, (_func))
82
46/* 83/*
47 * initialize all of a work-struct: 84 * initialize all of a work item in one go
48 */ 85 */
49#define INIT_WORK(_work, _func, _data) \ 86#define INIT_WORK(_work, _func) \
50 do { \ 87 do { \
88 (_work)->management = 0; \
51 INIT_LIST_HEAD(&(_work)->entry); \ 89 INIT_LIST_HEAD(&(_work)->entry); \
52 (_work)->pending = 0; \ 90 PREPARE_WORK((_work), (_func)); \
53 PREPARE_WORK((_work), (_func), (_data)); \ 91 } while (0)
92
93#define INIT_WORK_NAR(_work, _func) \
94 do { \
95 (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \
96 INIT_LIST_HEAD(&(_work)->entry); \
97 PREPARE_WORK((_work), (_func)); \
98 } while (0)
99
100#define INIT_DELAYED_WORK(_work, _func) \
101 do { \
102 INIT_WORK(&(_work)->work, (_func)); \
103 init_timer(&(_work)->timer); \
104 } while (0)
105
106#define INIT_DELAYED_WORK_NAR(_work, _func) \
107 do { \
108 INIT_WORK_NAR(&(_work)->work, (_func)); \
54 init_timer(&(_work)->timer); \ 109 init_timer(&(_work)->timer); \
55 } while (0) 110 } while (0)
56 111
112/**
113 * work_pending - Find out whether a work item is currently pending
114 * @work: The work item in question
115 */
116#define work_pending(work) \
117 test_bit(WORK_STRUCT_PENDING, &(work)->management)
118
119/**
120 * delayed_work_pending - Find out whether a delayable work item is currently
121 * pending
122 * @work: The work item in question
123 */
124#define delayed_work_pending(work) \
125 test_bit(WORK_STRUCT_PENDING, &(work)->work.management)
126
127/**
128 * work_release - Release a work item under execution
129 * @work: The work item to release
130 *
131 * This is used to release a work item that has been initialised with automatic
132 * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work
133 * function the opportunity to grab auxiliary data from the container of the
134 * work_struct before clearing the pending bit as the work_struct may be
135 * subject to deallocation the moment the pending bit is cleared.
136 *
137 * In such a case, this should be called in the work function after it has
138 * fetched any data it may require from the containter of the work_struct.
139 * After this function has been called, the work_struct may be scheduled for
140 * further execution or it may be deallocated unless other precautions are
141 * taken.
142 *
143 * This should also be used to release a delayed work item.
144 */
145#define work_release(work) \
146 clear_bit(WORK_STRUCT_PENDING, &(work)->management)
147
148
57extern struct workqueue_struct *__create_workqueue(const char *name, 149extern struct workqueue_struct *__create_workqueue(const char *name,
58 int singlethread); 150 int singlethread);
59#define create_workqueue(name) __create_workqueue((name), 0) 151#define create_workqueue(name) __create_workqueue((name), 0)
@@ -62,39 +154,38 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
62extern void destroy_workqueue(struct workqueue_struct *wq); 154extern void destroy_workqueue(struct workqueue_struct *wq);
63 155
64extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 156extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
65extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); 157extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
66extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 158extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
67 struct work_struct *work, unsigned long delay); 159 struct delayed_work *work, unsigned long delay);
68extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 160extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
69 161
70extern int FASTCALL(schedule_work(struct work_struct *work)); 162extern int FASTCALL(schedule_work(struct work_struct *work));
71extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); 163extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
72 164
73extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); 165extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
74extern int schedule_on_each_cpu(void (*func)(void *info), void *info); 166extern int schedule_on_each_cpu(work_func_t func);
75extern void flush_scheduled_work(void); 167extern void flush_scheduled_work(void);
76extern int current_is_keventd(void); 168extern int current_is_keventd(void);
77extern int keventd_up(void); 169extern int keventd_up(void);
78 170
79extern void init_workqueues(void); 171extern void init_workqueues(void);
80void cancel_rearming_delayed_work(struct work_struct *work); 172void cancel_rearming_delayed_work(struct delayed_work *work);
81void cancel_rearming_delayed_workqueue(struct workqueue_struct *, 173void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
82 struct work_struct *); 174 struct delayed_work *);
83int execute_in_process_context(void (*fn)(void *), void *, 175int execute_in_process_context(work_func_t fn, struct execute_work *);
84 struct execute_work *);
85 176
86/* 177/*
87 * Kill off a pending schedule_delayed_work(). Note that the work callback 178 * Kill off a pending schedule_delayed_work(). Note that the work callback
88 * function may still be running on return from cancel_delayed_work(). Run 179 * function may still be running on return from cancel_delayed_work(). Run
89 * flush_scheduled_work() to wait on it. 180 * flush_scheduled_work() to wait on it.
90 */ 181 */
91static inline int cancel_delayed_work(struct work_struct *work) 182static inline int cancel_delayed_work(struct delayed_work *work)
92{ 183{
93 int ret; 184 int ret;
94 185
95 ret = del_timer_sync(&work->timer); 186 ret = del_timer_sync(&work->timer);
96 if (ret) 187 if (ret)
97 clear_bit(0, &work->pending); 188 clear_bit(WORK_STRUCT_PENDING, &work->work.management);
98 return ret; 189 return ret;
99} 190}
100 191
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h
index 617b672b1132..89119277553d 100644
--- a/include/net/ieee80211softmac.h
+++ b/include/net/ieee80211softmac.h
@@ -108,8 +108,8 @@ struct ieee80211softmac_assoc_info {
108 /* Scan retries remaining */ 108 /* Scan retries remaining */
109 int scan_retry; 109 int scan_retry;
110 110
111 struct work_struct work; 111 struct delayed_work work;
112 struct work_struct timeout; 112 struct delayed_work timeout;
113}; 113};
114 114
115struct ieee80211softmac_bss_info { 115struct ieee80211softmac_bss_info {
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 5f48748fe017..f7be1ac73601 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -84,7 +84,7 @@ struct inet_timewait_death_row {
84}; 84};
85 85
86extern void inet_twdr_hangman(unsigned long data); 86extern void inet_twdr_hangman(unsigned long data);
87extern void inet_twdr_twkill_work(void *data); 87extern void inet_twdr_twkill_work(struct work_struct *work);
88extern void inet_twdr_twcal_tick(unsigned long data); 88extern void inet_twdr_twcal_tick(unsigned long data);
89 89
90#if (BITS_PER_LONG == 64) 90#if (BITS_PER_LONG == 64)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f8cbe40f52c0..c089f93ba591 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1030,7 +1030,7 @@ void sctp_inq_init(struct sctp_inq *);
1030void sctp_inq_free(struct sctp_inq *); 1030void sctp_inq_free(struct sctp_inq *);
1031void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet); 1031void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
1032struct sctp_chunk *sctp_inq_pop(struct sctp_inq *); 1032struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
1033void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *); 1033void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
1034 1034
1035/* This is the structure we use to hold outbound chunks. You push 1035/* This is the structure we use to hold outbound chunks. You push
1036 * chunks in and they automatically pop out the other end as bundled 1036 * chunks in and they automatically pop out the other end as bundled
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index ede639812f8a..623a0fc0dae1 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -262,9 +262,10 @@ struct pcmcia_socket {
262 u8 present:1, /* PCMCIA card is present in socket */ 262 u8 present:1, /* PCMCIA card is present in socket */
263 busy:1, /* "master" ioctl is used */ 263 busy:1, /* "master" ioctl is used */
264 dead:1, /* pcmcia module is being unloaded */ 264 dead:1, /* pcmcia module is being unloaded */
265 device_add_pending:1, /* a pseudo-multifunction-device 265 device_add_pending:1, /* a multifunction-device
266 * add event is pending */ 266 * add event is pending */
267 reserved:4; 267 mfc_pfc:1, /* the pending event adds a mfc (1) or pfc (0) */
268 reserved:3;
268 } pcmcia_state; 269 } pcmcia_state;
269 270
270 struct work_struct device_add; /* for adding further pseudo-multifunction 271 struct work_struct device_add; /* for adding further pseudo-multifunction
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 1d77b63c5ea4..9233ed5de664 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -201,9 +201,14 @@ struct domain_device {
201 void *lldd_dev; 201 void *lldd_dev;
202}; 202};
203 203
204struct sas_discovery_event {
205 struct work_struct work;
206 struct asd_sas_port *port;
207};
208
204struct sas_discovery { 209struct sas_discovery {
205 spinlock_t disc_event_lock; 210 spinlock_t disc_event_lock;
206 struct work_struct disc_work[DISC_NUM_EVENTS]; 211 struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
207 unsigned long pending; 212 unsigned long pending;
208 u8 fanout_sas_addr[8]; 213 u8 fanout_sas_addr[8];
209 u8 eeds_a[8]; 214 u8 eeds_a[8];
@@ -249,14 +254,19 @@ struct asd_sas_port {
249 void *lldd_port; /* not touched by the sas class code */ 254 void *lldd_port; /* not touched by the sas class code */
250}; 255};
251 256
257struct asd_sas_event {
258 struct work_struct work;
259 struct asd_sas_phy *phy;
260};
261
252/* The phy pretty much is controlled by the LLDD. 262/* The phy pretty much is controlled by the LLDD.
253 * The class only reads those fields. 263 * The class only reads those fields.
254 */ 264 */
255struct asd_sas_phy { 265struct asd_sas_phy {
256/* private: */ 266/* private: */
257 /* protected by ha->event_lock */ 267 /* protected by ha->event_lock */
258 struct work_struct port_events[PORT_NUM_EVENTS]; 268 struct asd_sas_event port_events[PORT_NUM_EVENTS];
259 struct work_struct phy_events[PHY_NUM_EVENTS]; 269 struct asd_sas_event phy_events[PHY_NUM_EVENTS];
260 270
261 unsigned long port_events_pending; 271 unsigned long port_events_pending;
262 unsigned long phy_events_pending; 272 unsigned long phy_events_pending;
@@ -308,10 +318,15 @@ struct scsi_core {
308 int queue_thread_kill; 318 int queue_thread_kill;
309}; 319};
310 320
321struct sas_ha_event {
322 struct work_struct work;
323 struct sas_ha_struct *ha;
324};
325
311struct sas_ha_struct { 326struct sas_ha_struct {
312/* private: */ 327/* private: */
313 spinlock_t event_lock; 328 spinlock_t event_lock;
314 struct work_struct ha_events[HA_NUM_EVENTS]; 329 struct sas_ha_event ha_events[HA_NUM_EVENTS];
315 unsigned long pending; 330 unsigned long pending;
316 331
317 struct scsi_core core; 332 struct scsi_core core;
@@ -339,6 +354,8 @@ struct sas_ha_struct {
339 void (*notify_phy_event)(struct asd_sas_phy *, enum phy_event); 354 void (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
340 355
341 void *lldd_ha; /* not touched by sas class code */ 356 void *lldd_ha; /* not touched by sas class code */
357
358 struct list_head eh_done_q;
342}; 359};
343 360
344#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata) 361#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@@ -527,13 +544,16 @@ struct sas_task {
527 544
528 void *lldd_task; /* for use by LLDDs */ 545 void *lldd_task; /* for use by LLDDs */
529 void *uldd_task; 546 void *uldd_task;
547
548 struct work_struct abort_work;
530}; 549};
531 550
532 551
533 552
534#define SAS_TASK_STATE_PENDING 1 553#define SAS_TASK_STATE_PENDING 1
535#define SAS_TASK_STATE_DONE 2 554#define SAS_TASK_STATE_DONE 2
536#define SAS_TASK_STATE_ABORTED 4 555#define SAS_TASK_STATE_ABORTED 4
556#define SAS_TASK_INITIATOR_ABORTED 8
537 557
538static inline struct sas_task *sas_alloc_task(gfp_t flags) 558static inline struct sas_task *sas_alloc_task(gfp_t flags)
539{ 559{
@@ -593,6 +613,7 @@ struct sas_domain_function_template {
593extern int sas_register_ha(struct sas_ha_struct *); 613extern int sas_register_ha(struct sas_ha_struct *);
594extern int sas_unregister_ha(struct sas_ha_struct *); 614extern int sas_unregister_ha(struct sas_ha_struct *);
595 615
616int sas_phy_reset(struct sas_phy *phy, int hard_reset);
596extern int sas_queuecommand(struct scsi_cmnd *, 617extern int sas_queuecommand(struct scsi_cmnd *,
597 void (*scsi_done)(struct scsi_cmnd *)); 618 void (*scsi_done)(struct scsi_cmnd *));
598extern int sas_target_alloc(struct scsi_target *); 619extern int sas_target_alloc(struct scsi_target *);
@@ -625,4 +646,6 @@ void sas_unregister_dev(struct domain_device *);
625 646
626void sas_init_dev(struct domain_device *); 647void sas_init_dev(struct domain_device *);
627 648
649void sas_task_abort(struct work_struct *);
650
628#endif /* _SASLIB_H_ */ 651#endif /* _SASLIB_H_ */
diff --git a/include/scsi/libsrp.h b/include/scsi/libsrp.h
new file mode 100644
index 000000000000..d143171896ae
--- /dev/null
+++ b/include/scsi/libsrp.h
@@ -0,0 +1,77 @@
1#ifndef __LIBSRP_H__
2#define __LIBSRP_H__
3
4#include <linux/list.h>
5#include <scsi/scsi_cmnd.h>
6#include <scsi/scsi_host.h>
7#include <scsi/srp.h>
8
9enum iue_flags {
10 V_DIOVER,
11 V_WRITE,
12 V_LINKED,
13 V_FLYING,
14};
15
16struct srp_buf {
17 dma_addr_t dma;
18 void *buf;
19};
20
21struct srp_queue {
22 void *pool;
23 void *items;
24 struct kfifo *queue;
25 spinlock_t lock;
26};
27
28struct srp_target {
29 struct Scsi_Host *shost;
30 struct device *dev;
31
32 spinlock_t lock;
33 struct list_head cmd_queue;
34
35 size_t srp_iu_size;
36 struct srp_queue iu_queue;
37 size_t rx_ring_size;
38 struct srp_buf **rx_ring;
39
40 void *ldata;
41};
42
43struct iu_entry {
44 struct srp_target *target;
45
46 struct list_head ilist;
47 dma_addr_t remote_token;
48 unsigned long flags;
49
50 struct srp_buf *sbuf;
51};
52
53typedef int (srp_rdma_t)(struct scsi_cmnd *, struct scatterlist *, int,
54 struct srp_direct_buf *, int,
55 enum dma_data_direction, unsigned int);
56extern int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t);
57extern void srp_target_free(struct srp_target *);
58
59extern struct iu_entry *srp_iu_get(struct srp_target *);
60extern void srp_iu_put(struct iu_entry *);
61
62extern int srp_cmd_queue(struct Scsi_Host *, struct srp_cmd *, void *, u64);
63extern int srp_transfer_data(struct scsi_cmnd *, struct srp_cmd *,
64 srp_rdma_t, int, int);
65
66
67static inline struct srp_target *host_to_srp_target(struct Scsi_Host *host)
68{
69 return (struct srp_target *) host->hostdata;
70}
71
72static inline int srp_cmd_direction(struct srp_cmd *cmd)
73{
74 return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
75}
76
77#endif
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index be117f812deb..d6948d0e8cdb 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -8,6 +8,7 @@
8 8
9struct request; 9struct request;
10struct scatterlist; 10struct scatterlist;
11struct Scsi_Host;
11struct scsi_device; 12struct scsi_device;
12 13
13 14
@@ -72,6 +73,9 @@ struct scsi_cmnd {
72 unsigned short use_sg; /* Number of pieces of scatter-gather */ 73 unsigned short use_sg; /* Number of pieces of scatter-gather */
73 unsigned short sglist_len; /* size of malloc'd scatter-gather list */ 74 unsigned short sglist_len; /* size of malloc'd scatter-gather list */
74 75
76 /* offset in cmd we are at (for multi-transfer tgt cmds) */
77 unsigned offset;
78
75 unsigned underflow; /* Return error if less than 79 unsigned underflow; /* Return error if less than
76 this amount is transferred */ 80 this amount is transferred */
77 81
@@ -119,7 +123,10 @@ struct scsi_cmnd {
119}; 123};
120 124
121extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t); 125extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
126extern struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *, gfp_t);
122extern void scsi_put_command(struct scsi_cmnd *); 127extern void scsi_put_command(struct scsi_cmnd *);
128extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
129 struct device *);
123extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); 130extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
124extern void scsi_finish_command(struct scsi_cmnd *cmd); 131extern void scsi_finish_command(struct scsi_cmnd *cmd);
125extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd); 132extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
@@ -128,4 +135,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
128 size_t *offset, size_t *len); 135 size_t *offset, size_t *len);
129extern void scsi_kunmap_atomic_sg(void *virt); 136extern void scsi_kunmap_atomic_sg(void *virt);
130 137
138extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
139extern void scsi_free_sgtable(struct scatterlist *, int);
140
131#endif /* _SCSI_SCSI_CMND_H */ 141#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b401c82036be..ebf31b16dc49 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -223,13 +223,13 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
223 struct scsi_device *); 223 struct scsi_device *);
224 224
225/** 225/**
226 * shost_for_each_device - iterate over all devices of a host 226 * shost_for_each_device - iterate over all devices of a host
227 * @sdev: iterator 227 * @sdev: the &struct scsi_device to use as a cursor
228 * @host: host whiches devices we want to iterate over 228 * @shost: the &struct scsi_host to iterate over
229 * 229 *
230 * This traverses over each devices of @shost. The devices have 230 * Iterator that returns each device attached to @shost. This loop
231 * a reference that must be released by scsi_host_put when breaking 231 * takes a reference on each device and releases it at the end. If
232 * out of the loop. 232 * you break out of the loop, you must call scsi_device_put(sdev).
233 */ 233 */
234#define shost_for_each_device(sdev, shost) \ 234#define shost_for_each_device(sdev, shost) \
235 for ((sdev) = __scsi_iterate_devices((shost), NULL); \ 235 for ((sdev) = __scsi_iterate_devices((shost), NULL); \
@@ -237,17 +237,17 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
237 (sdev) = __scsi_iterate_devices((shost), (sdev))) 237 (sdev) = __scsi_iterate_devices((shost), (sdev)))
238 238
239/** 239/**
240 * __shost_for_each_device - iterate over all devices of a host (UNLOCKED) 240 * __shost_for_each_device - iterate over all devices of a host (UNLOCKED)
241 * @sdev: iterator 241 * @sdev: the &struct scsi_device to use as a cursor
242 * @host: host whiches devices we want to iterate over 242 * @shost: the &struct scsi_host to iterate over
243 * 243 *
244 * This traverses over each devices of @shost. It does _not_ take a 244 * Iterator that returns each device attached to @shost. It does _not_
245 * reference on the scsi_device, thus it the whole loop must be protected 245 * take a reference on the scsi_device, so the whole loop must be
246 * by shost->host_lock. 246 * protected by shost->host_lock.
247 * 247 *
248 * Note: The only reason why drivers would want to use this is because 248 * Note: The only reason to use this is because you need to access the
249 * they're need to access the device list in irq context. Otherwise you 249 * device list in interrupt context. Otherwise you really want to use
250 * really want to use shost_for_each_device instead. 250 * shost_for_each_device instead.
251 */ 251 */
252#define __shost_for_each_device(sdev, shost) \ 252#define __shost_for_each_device(sdev, shost) \
253 list_for_each_entry((sdev), &((shost)->__devices), siblings) 253 list_for_each_entry((sdev), &((shost)->__devices), siblings)
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 39c6f8cc20c3..7f1f411d07af 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -7,6 +7,7 @@
7#include <linux/workqueue.h> 7#include <linux/workqueue.h>
8#include <linux/mutex.h> 8#include <linux/mutex.h>
9 9
10struct request_queue;
10struct block_device; 11struct block_device;
11struct completion; 12struct completion;
12struct module; 13struct module;
@@ -124,6 +125,39 @@ struct scsi_host_template {
124 void (*done)(struct scsi_cmnd *)); 125 void (*done)(struct scsi_cmnd *));
125 126
126 /* 127 /*
128 * The transfer functions are used to queue a scsi command to
129 * the LLD. When the driver is finished processing the command
130 * the done callback is invoked.
131 *
132 * return values: see queuecommand
133 *
134 * If the LLD accepts the cmd, it should set the result to an
135 * appropriate value when completed before calling the done function.
136 *
137 * STATUS: REQUIRED FOR TARGET DRIVERS
138 */
139 /* TODO: rename */
140 int (* transfer_response)(struct scsi_cmnd *,
141 void (*done)(struct scsi_cmnd *));
142 /*
143 * This is called to inform the LLD to transfer cmd->request_bufflen
144 * bytes of the cmd at cmd->offset in the cmd. The cmd->use_sg
145 * speciefies the number of scatterlist entried in the command
146 * and cmd->request_buffer contains the scatterlist.
147 *
148 * If the command cannot be processed in one transfer_data call
149 * becuase a scatterlist within the LLD's limits cannot be
150 * created then transfer_data will be called multiple times.
151 * It is initially called from process context, and later
152 * calls are from the interrup context.
153 */
154 int (* transfer_data)(struct scsi_cmnd *,
155 void (*done)(struct scsi_cmnd *));
156
157 /* Used as callback for the completion of task management request. */
158 int (* tsk_mgmt_response)(u64 mid, int result);
159
160 /*
127 * This is an error handling strategy routine. You don't need to 161 * This is an error handling strategy routine. You don't need to
128 * define one of these if you don't want to - there is a default 162 * define one of these if you don't want to - there is a default
129 * routine that is present that should work in most cases. For those 163 * routine that is present that should work in most cases. For those
@@ -241,6 +275,24 @@ struct scsi_host_template {
241 void (* target_destroy)(struct scsi_target *); 275 void (* target_destroy)(struct scsi_target *);
242 276
243 /* 277 /*
278 * If a host has the ability to discover targets on its own instead
279 * of scanning the entire bus, it can fill in this function and
280 * call scsi_scan_host(). This function will be called periodically
281 * until it returns 1 with the scsi_host and the elapsed time of
282 * the scan in jiffies.
283 *
284 * Status: OPTIONAL
285 */
286 int (* scan_finished)(struct Scsi_Host *, unsigned long);
287
288 /*
289 * If the host wants to be called before the scan starts, but
290 * after the midlayer has set up ready for the scan, it can fill
291 * in this function.
292 */
293 void (* scan_start)(struct Scsi_Host *);
294
295 /*
244 * fill in this function to allow the queue depth of this host 296 * fill in this function to allow the queue depth of this host
245 * to be changeable (on a per device basis). returns either 297 * to be changeable (on a per device basis). returns either
246 * the current queue depth setting (may be different from what 298 * the current queue depth setting (may be different from what
@@ -552,6 +604,9 @@ struct Scsi_Host {
552 /* task mgmt function in progress */ 604 /* task mgmt function in progress */
553 unsigned tmf_in_progress:1; 605 unsigned tmf_in_progress:1;
554 606
607 /* Asynchronous scan in progress */
608 unsigned async_scan:1;
609
555 /* 610 /*
556 * Optional work queue to be utilized by the transport 611 * Optional work queue to be utilized by the transport
557 */ 612 */
@@ -568,6 +623,12 @@ struct Scsi_Host {
568 */ 623 */
569 unsigned int max_host_blocked; 624 unsigned int max_host_blocked;
570 625
626 /*
627 * q used for scsi_tgt msgs, async events or any other requests that
628 * need to be processed in userspace
629 */
630 struct request_queue *uspace_req_q;
631
571 /* legacy crap */ 632 /* legacy crap */
572 unsigned long base; 633 unsigned long base;
573 unsigned long io_port; 634 unsigned long io_port;
@@ -648,11 +709,6 @@ extern const char *scsi_host_state_name(enum scsi_host_state);
648 709
649extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); 710extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
650 711
651static inline void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
652{
653 shost->host_lock = lock;
654}
655
656static inline struct device *scsi_get_device(struct Scsi_Host *shost) 712static inline struct device *scsi_get_device(struct Scsi_Host *shost)
657{ 713{
658 return shost->shost_gendev.parent; 714 return shost->shost_gendev.parent;
@@ -671,6 +727,9 @@ extern void scsi_unblock_requests(struct Scsi_Host *);
671extern void scsi_block_requests(struct Scsi_Host *); 727extern void scsi_block_requests(struct Scsi_Host *);
672 728
673struct class_container; 729struct class_container;
730
731extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
732 void (*) (struct request_queue *));
674/* 733/*
675 * These two functions are used to allocate and free a pseudo device 734 * These two functions are used to allocate and free a pseudo device
676 * which will connect to the host adapter itself rather than any 735 * which will connect to the host adapter itself rather than any
diff --git a/include/scsi/scsi_tgt.h b/include/scsi/scsi_tgt.h
new file mode 100644
index 000000000000..4f4427937af2
--- /dev/null
+++ b/include/scsi/scsi_tgt.h
@@ -0,0 +1,19 @@
1/*
2 * SCSI target definitions
3 */
4
5#include <linux/dma-mapping.h>
6
7struct Scsi_Host;
8struct scsi_cmnd;
9struct scsi_lun;
10
11extern struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *);
12extern int scsi_tgt_alloc_queue(struct Scsi_Host *);
13extern void scsi_tgt_free_queue(struct Scsi_Host *);
14extern int scsi_tgt_queue_command(struct scsi_cmnd *, struct scsi_lun *, u64);
15extern int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *, int, u64, struct scsi_lun *,
16 void *);
17extern struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *,
18 enum dma_data_direction, gfp_t);
19extern void scsi_host_put_command(struct Scsi_Host *, struct scsi_cmnd *);
diff --git a/include/scsi/scsi_tgt_if.h b/include/scsi/scsi_tgt_if.h
new file mode 100644
index 000000000000..46d5e70d7215
--- /dev/null
+++ b/include/scsi/scsi_tgt_if.h
@@ -0,0 +1,90 @@
1/*
2 * SCSI target kernel/user interface
3 *
4 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22#ifndef __SCSI_TARGET_IF_H
23#define __SCSI_TARGET_IF_H
24
25/* user -> kernel */
26#define TGT_UEVENT_CMD_RSP 0x0001
27#define TGT_UEVENT_TSK_MGMT_RSP 0x0002
28
29/* kernel -> user */
30#define TGT_KEVENT_CMD_REQ 0x1001
31#define TGT_KEVENT_CMD_DONE 0x1002
32#define TGT_KEVENT_TSK_MGMT_REQ 0x1003
33
34struct tgt_event_hdr {
35 uint16_t version;
36 uint16_t status;
37 uint16_t type;
38 uint16_t len;
39} __attribute__ ((aligned (sizeof(uint64_t))));
40
41struct tgt_event {
42 struct tgt_event_hdr hdr;
43
44 union {
45 /* user-> kernel */
46 struct {
47 int host_no;
48 uint32_t len;
49 int result;
50 aligned_u64 uaddr;
51 uint8_t rw;
52 aligned_u64 tag;
53 } cmd_rsp;
54 struct {
55 int host_no;
56 aligned_u64 mid;
57 int result;
58 } tsk_mgmt_rsp;
59
60
61 /* kernel -> user */
62 struct {
63 int host_no;
64 uint32_t data_len;
65 uint8_t scb[16];
66 uint8_t lun[8];
67 int attribute;
68 aligned_u64 tag;
69 } cmd_req;
70 struct {
71 int host_no;
72 aligned_u64 tag;
73 int result;
74 } cmd_done;
75 struct {
76 int host_no;
77 int function;
78 aligned_u64 tag;
79 uint8_t lun[8];
80 aligned_u64 mid;
81 } tsk_mgmt_req;
82 } p;
83} __attribute__ ((aligned (sizeof(uint64_t))));
84
85#define TGT_RING_SIZE (1UL << 16)
86#define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT)
87#define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event))
88#define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES)
89
90#endif
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index fd352323378b..798f7c7ee426 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -206,9 +206,9 @@ struct fc_rport { /* aka fc_starget_attrs */
206 u8 flags; 206 u8 flags;
207 struct list_head peers; 207 struct list_head peers;
208 struct device dev; 208 struct device dev;
209 struct work_struct dev_loss_work; 209 struct delayed_work dev_loss_work;
210 struct work_struct scan_work; 210 struct work_struct scan_work;
211 struct work_struct fail_io_work; 211 struct delayed_work fail_io_work;
212 struct work_struct stgt_delete_work; 212 struct work_struct stgt_delete_work;
213 struct work_struct rport_delete_work; 213 struct work_struct rport_delete_work;
214} __attribute__((aligned(sizeof(unsigned long)))); 214} __attribute__((aligned(sizeof(unsigned long))));
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 4b95c89c95c9..d5c218ddc527 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -176,7 +176,7 @@ struct iscsi_cls_session {
176 176
177 /* recovery fields */ 177 /* recovery fields */
178 int recovery_tmo; 178 int recovery_tmo;
179 struct work_struct recovery_work; 179 struct delayed_work recovery_work;
180 180
181 int target_id; 181 int target_id;
182 182
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 53024377f3b8..59633a82de47 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -73,6 +73,8 @@ struct sas_phy {
73 73
74 /* for the list of phys belonging to a port */ 74 /* for the list of phys belonging to a port */
75 struct list_head port_siblings; 75 struct list_head port_siblings;
76
77 struct work_struct reset_work;
76}; 78};
77 79
78#define dev_to_phy(d) \ 80#define dev_to_phy(d) \
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 4c43521cc493..33720397a904 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -511,7 +511,7 @@ struct snd_ac97 {
511#ifdef CONFIG_SND_AC97_POWER_SAVE 511#ifdef CONFIG_SND_AC97_POWER_SAVE
512 unsigned int power_up; /* power states */ 512 unsigned int power_up; /* power states */
513 struct workqueue_struct *power_workq; 513 struct workqueue_struct *power_workq;
514 struct work_struct power_work; 514 struct delayed_work power_work;
515#endif 515#endif
516 struct device dev; 516 struct device dev;
517}; 517};
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 11702aa0bea9..2ee061625fd0 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -182,7 +182,7 @@ struct ak4114 {
182 unsigned char rcs0; 182 unsigned char rcs0;
183 unsigned char rcs1; 183 unsigned char rcs1;
184 struct workqueue_struct *workqueue; 184 struct workqueue_struct *workqueue;
185 struct work_struct work; 185 struct delayed_work work;
186 void *change_callback_private; 186 void *change_callback_private;
187 void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1); 187 void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1);
188}; 188};
diff --git a/ipc/util.c b/ipc/util.c
index cd8bb14a431f..a9b7a227b8d4 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -514,6 +514,11 @@ void ipc_rcu_getref(void *ptr)
514 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; 514 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
515} 515}
516 516
517static void ipc_do_vfree(struct work_struct *work)
518{
519 vfree(container_of(work, struct ipc_rcu_sched, work));
520}
521
517/** 522/**
518 * ipc_schedule_free - free ipc + rcu space 523 * ipc_schedule_free - free ipc + rcu space
519 * @head: RCU callback structure for queued work 524 * @head: RCU callback structure for queued work
@@ -528,7 +533,7 @@ static void ipc_schedule_free(struct rcu_head *head)
528 struct ipc_rcu_sched *sched = 533 struct ipc_rcu_sched *sched =
529 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); 534 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
530 535
531 INIT_WORK(&sched->work, vfree, sched); 536 INIT_WORK(&sched->work, ipc_do_vfree);
532 schedule_work(&sched->work); 537 schedule_work(&sched->work);
533} 538}
534 539
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 2b76dee28496..8d2bea09a4ec 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -114,6 +114,7 @@ EXPORT_SYMBOL(request_module);
114#endif /* CONFIG_KMOD */ 114#endif /* CONFIG_KMOD */
115 115
116struct subprocess_info { 116struct subprocess_info {
117 struct work_struct work;
117 struct completion *complete; 118 struct completion *complete;
118 char *path; 119 char *path;
119 char **argv; 120 char **argv;
@@ -221,9 +222,10 @@ static int wait_for_helper(void *data)
221} 222}
222 223
223/* This is run by khelper thread */ 224/* This is run by khelper thread */
224static void __call_usermodehelper(void *data) 225static void __call_usermodehelper(struct work_struct *work)
225{ 226{
226 struct subprocess_info *sub_info = data; 227 struct subprocess_info *sub_info =
228 container_of(work, struct subprocess_info, work);
227 pid_t pid; 229 pid_t pid;
228 int wait = sub_info->wait; 230 int wait = sub_info->wait;
229 231
@@ -264,6 +266,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
264{ 266{
265 DECLARE_COMPLETION_ONSTACK(done); 267 DECLARE_COMPLETION_ONSTACK(done);
266 struct subprocess_info sub_info = { 268 struct subprocess_info sub_info = {
269 .work = __WORK_INITIALIZER(sub_info.work,
270 __call_usermodehelper),
267 .complete = &done, 271 .complete = &done,
268 .path = path, 272 .path = path,
269 .argv = argv, 273 .argv = argv,
@@ -272,7 +276,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
272 .wait = wait, 276 .wait = wait,
273 .retval = 0, 277 .retval = 0,
274 }; 278 };
275 DECLARE_WORK(work, __call_usermodehelper, &sub_info);
276 279
277 if (!khelper_wq) 280 if (!khelper_wq)
278 return -EBUSY; 281 return -EBUSY;
@@ -280,7 +283,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
280 if (path[0] == '\0') 283 if (path[0] == '\0')
281 return 0; 284 return 0;
282 285
283 queue_work(khelper_wq, &work); 286 queue_work(khelper_wq, &sub_info.work);
284 wait_for_completion(&done); 287 wait_for_completion(&done);
285 return sub_info.retval; 288 return sub_info.retval;
286} 289}
@@ -291,6 +294,8 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
291{ 294{
292 DECLARE_COMPLETION(done); 295 DECLARE_COMPLETION(done);
293 struct subprocess_info sub_info = { 296 struct subprocess_info sub_info = {
297 .work = __WORK_INITIALIZER(sub_info.work,
298 __call_usermodehelper),
294 .complete = &done, 299 .complete = &done,
295 .path = path, 300 .path = path,
296 .argv = argv, 301 .argv = argv,
@@ -298,7 +303,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
298 .retval = 0, 303 .retval = 0,
299 }; 304 };
300 struct file *f; 305 struct file *f;
301 DECLARE_WORK(work, __call_usermodehelper, &sub_info);
302 306
303 if (!khelper_wq) 307 if (!khelper_wq)
304 return -EBUSY; 308 return -EBUSY;
@@ -318,7 +322,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
318 } 322 }
319 sub_info.stdin = f; 323 sub_info.stdin = f;
320 324
321 queue_work(khelper_wq, &work); 325 queue_work(khelper_wq, &sub_info.work);
322 wait_for_completion(&done); 326 wait_for_completion(&done);
323 return sub_info.retval; 327 return sub_info.retval;
324} 328}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4f9c60ef95e8..1db8c72d0d38 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -31,6 +31,8 @@ struct kthread_create_info
31 /* Result passed back to kthread_create() from keventd. */ 31 /* Result passed back to kthread_create() from keventd. */
32 struct task_struct *result; 32 struct task_struct *result;
33 struct completion done; 33 struct completion done;
34
35 struct work_struct work;
34}; 36};
35 37
36struct kthread_stop_info 38struct kthread_stop_info
@@ -111,9 +113,10 @@ static int kthread(void *_create)
111} 113}
112 114
113/* We are keventd: create a thread. */ 115/* We are keventd: create a thread. */
114static void keventd_create_kthread(void *_create) 116static void keventd_create_kthread(struct work_struct *work)
115{ 117{
116 struct kthread_create_info *create = _create; 118 struct kthread_create_info *create =
119 container_of(work, struct kthread_create_info, work);
117 int pid; 120 int pid;
118 121
119 /* We want our own signal handler (we take no signals by default). */ 122 /* We want our own signal handler (we take no signals by default). */
@@ -154,20 +157,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
154 ...) 157 ...)
155{ 158{
156 struct kthread_create_info create; 159 struct kthread_create_info create;
157 DECLARE_WORK(work, keventd_create_kthread, &create);
158 160
159 create.threadfn = threadfn; 161 create.threadfn = threadfn;
160 create.data = data; 162 create.data = data;
161 init_completion(&create.started); 163 init_completion(&create.started);
162 init_completion(&create.done); 164 init_completion(&create.done);
165 INIT_WORK(&create.work, keventd_create_kthread);
163 166
164 /* 167 /*
165 * The workqueue needs to start up first: 168 * The workqueue needs to start up first:
166 */ 169 */
167 if (!helper_wq) 170 if (!helper_wq)
168 work.func(work.data); 171 create.work.func(&create.work);
169 else { 172 else {
170 queue_work(helper_wq, &work); 173 queue_work(helper_wq, &create.work);
171 wait_for_completion(&create.done); 174 wait_for_completion(&create.done);
172 } 175 }
173 if (!IS_ERR(create.result)) { 176 if (!IS_ERR(create.result)) {
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index f1f900ac3164..678ec736076b 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -16,12 +16,12 @@
16 * callback we use. 16 * callback we use.
17 */ 17 */
18 18
19static void do_poweroff(void *dummy) 19static void do_poweroff(struct work_struct *dummy)
20{ 20{
21 kernel_power_off(); 21 kernel_power_off();
22} 22}
23 23
24static DECLARE_WORK(poweroff_work, do_poweroff, NULL); 24static DECLARE_WORK(poweroff_work, do_poweroff);
25 25
26static void handle_poweroff(int key, struct tty_struct *tty) 26static void handle_poweroff(int key, struct tty_struct *tty)
27{ 27{
diff --git a/kernel/relay.c b/kernel/relay.c
index f04bbdb56ac2..2b92e8ece85b 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -308,9 +308,10 @@ static struct rchan_callbacks default_channel_callbacks = {
308 * reason waking is deferred is that calling directly from write 308 * reason waking is deferred is that calling directly from write
309 * causes problems if you're writing from say the scheduler. 309 * causes problems if you're writing from say the scheduler.
310 */ 310 */
311static void wakeup_readers(void *private) 311static void wakeup_readers(struct work_struct *work)
312{ 312{
313 struct rchan_buf *buf = private; 313 struct rchan_buf *buf =
314 container_of(work, struct rchan_buf, wake_readers.work);
314 wake_up_interruptible(&buf->read_wait); 315 wake_up_interruptible(&buf->read_wait);
315} 316}
316 317
@@ -328,7 +329,7 @@ static inline void __relay_reset(struct rchan_buf *buf, unsigned int init)
328 if (init) { 329 if (init) {
329 init_waitqueue_head(&buf->read_wait); 330 init_waitqueue_head(&buf->read_wait);
330 kref_init(&buf->kref); 331 kref_init(&buf->kref);
331 INIT_WORK(&buf->wake_readers, NULL, NULL); 332 INIT_DELAYED_WORK(&buf->wake_readers, NULL);
332 } else { 333 } else {
333 cancel_delayed_work(&buf->wake_readers); 334 cancel_delayed_work(&buf->wake_readers);
334 flush_scheduled_work(); 335 flush_scheduled_work();
@@ -549,7 +550,8 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
549 buf->padding[old_subbuf]; 550 buf->padding[old_subbuf];
550 smp_mb(); 551 smp_mb();
551 if (waitqueue_active(&buf->read_wait)) { 552 if (waitqueue_active(&buf->read_wait)) {
552 PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf); 553 PREPARE_DELAYED_WORK(&buf->wake_readers,
554 wakeup_readers);
553 schedule_delayed_work(&buf->wake_readers, 1); 555 schedule_delayed_work(&buf->wake_readers, 1);
554 } 556 }
555 } 557 }
diff --git a/kernel/sys.c b/kernel/sys.c
index 98489d82801b..c87b461de38d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
880 return 0; 880 return 0;
881} 881}
882 882
883static void deferred_cad(void *dummy) 883static void deferred_cad(struct work_struct *dummy)
884{ 884{
885 kernel_restart(NULL); 885 kernel_restart(NULL);
886} 886}
@@ -892,7 +892,7 @@ static void deferred_cad(void *dummy)
892 */ 892 */
893void ctrl_alt_del(void) 893void ctrl_alt_del(void)
894{ 894{
895 static DECLARE_WORK(cad_work, deferred_cad, NULL); 895 static DECLARE_WORK(cad_work, deferred_cad);
896 896
897 if (C_A_D) 897 if (C_A_D)
898 schedule_work(&cad_work); 898 schedule_work(&cad_work);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 17c2f03d2c27..8d1e7cb8a51a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -80,6 +80,29 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
80 return list_empty(&wq->list); 80 return list_empty(&wq->list);
81} 81}
82 82
83static inline void set_wq_data(struct work_struct *work, void *wq)
84{
85 unsigned long new, old, res;
86
87 /* assume the pending flag is already set and that the task has already
88 * been queued on this workqueue */
89 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
90 res = work->management;
91 if (res != new) {
92 do {
93 old = res;
94 new = (unsigned long) wq;
95 new |= (old & WORK_STRUCT_FLAG_MASK);
96 res = cmpxchg(&work->management, old, new);
97 } while (res != old);
98 }
99}
100
101static inline void *get_wq_data(struct work_struct *work)
102{
103 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
104}
105
83/* Preempt must be disabled. */ 106/* Preempt must be disabled. */
84static void __queue_work(struct cpu_workqueue_struct *cwq, 107static void __queue_work(struct cpu_workqueue_struct *cwq,
85 struct work_struct *work) 108 struct work_struct *work)
@@ -87,7 +110,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
87 unsigned long flags; 110 unsigned long flags;
88 111
89 spin_lock_irqsave(&cwq->lock, flags); 112 spin_lock_irqsave(&cwq->lock, flags);
90 work->wq_data = cwq; 113 set_wq_data(work, cwq);
91 list_add_tail(&work->entry, &cwq->worklist); 114 list_add_tail(&work->entry, &cwq->worklist);
92 cwq->insert_sequence++; 115 cwq->insert_sequence++;
93 wake_up(&cwq->more_work); 116 wake_up(&cwq->more_work);
@@ -108,7 +131,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
108{ 131{
109 int ret = 0, cpu = get_cpu(); 132 int ret = 0, cpu = get_cpu();
110 133
111 if (!test_and_set_bit(0, &work->pending)) { 134 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
112 if (unlikely(is_single_threaded(wq))) 135 if (unlikely(is_single_threaded(wq)))
113 cpu = singlethread_cpu; 136 cpu = singlethread_cpu;
114 BUG_ON(!list_empty(&work->entry)); 137 BUG_ON(!list_empty(&work->entry));
@@ -122,38 +145,42 @@ EXPORT_SYMBOL_GPL(queue_work);
122 145
123static void delayed_work_timer_fn(unsigned long __data) 146static void delayed_work_timer_fn(unsigned long __data)
124{ 147{
125 struct work_struct *work = (struct work_struct *)__data; 148 struct delayed_work *dwork = (struct delayed_work *)__data;
126 struct workqueue_struct *wq = work->wq_data; 149 struct workqueue_struct *wq = get_wq_data(&dwork->work);
127 int cpu = smp_processor_id(); 150 int cpu = smp_processor_id();
128 151
129 if (unlikely(is_single_threaded(wq))) 152 if (unlikely(is_single_threaded(wq)))
130 cpu = singlethread_cpu; 153 cpu = singlethread_cpu;
131 154
132 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 155 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
133} 156}
134 157
135/** 158/**
136 * queue_delayed_work - queue work on a workqueue after delay 159 * queue_delayed_work - queue work on a workqueue after delay
137 * @wq: workqueue to use 160 * @wq: workqueue to use
138 * @work: work to queue 161 * @work: delayable work to queue
139 * @delay: number of jiffies to wait before queueing 162 * @delay: number of jiffies to wait before queueing
140 * 163 *
141 * Returns 0 if @work was already on a queue, non-zero otherwise. 164 * Returns 0 if @work was already on a queue, non-zero otherwise.
142 */ 165 */
143int fastcall queue_delayed_work(struct workqueue_struct *wq, 166int fastcall queue_delayed_work(struct workqueue_struct *wq,
144 struct work_struct *work, unsigned long delay) 167 struct delayed_work *dwork, unsigned long delay)
145{ 168{
146 int ret = 0; 169 int ret = 0;
147 struct timer_list *timer = &work->timer; 170 struct timer_list *timer = &dwork->timer;
171 struct work_struct *work = &dwork->work;
172
173 if (delay == 0)
174 return queue_work(wq, work);
148 175
149 if (!test_and_set_bit(0, &work->pending)) { 176 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
150 BUG_ON(timer_pending(timer)); 177 BUG_ON(timer_pending(timer));
151 BUG_ON(!list_empty(&work->entry)); 178 BUG_ON(!list_empty(&work->entry));
152 179
153 /* This stores wq for the moment, for the timer_fn */ 180 /* This stores wq for the moment, for the timer_fn */
154 work->wq_data = wq; 181 set_wq_data(work, wq);
155 timer->expires = jiffies + delay; 182 timer->expires = jiffies + delay;
156 timer->data = (unsigned long)work; 183 timer->data = (unsigned long)dwork;
157 timer->function = delayed_work_timer_fn; 184 timer->function = delayed_work_timer_fn;
158 add_timer(timer); 185 add_timer(timer);
159 ret = 1; 186 ret = 1;
@@ -172,19 +199,20 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
172 * Returns 0 if @work was already on a queue, non-zero otherwise. 199 * Returns 0 if @work was already on a queue, non-zero otherwise.
173 */ 200 */
174int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 201int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
175 struct work_struct *work, unsigned long delay) 202 struct delayed_work *dwork, unsigned long delay)
176{ 203{
177 int ret = 0; 204 int ret = 0;
178 struct timer_list *timer = &work->timer; 205 struct timer_list *timer = &dwork->timer;
206 struct work_struct *work = &dwork->work;
179 207
180 if (!test_and_set_bit(0, &work->pending)) { 208 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
181 BUG_ON(timer_pending(timer)); 209 BUG_ON(timer_pending(timer));
182 BUG_ON(!list_empty(&work->entry)); 210 BUG_ON(!list_empty(&work->entry));
183 211
184 /* This stores wq for the moment, for the timer_fn */ 212 /* This stores wq for the moment, for the timer_fn */
185 work->wq_data = wq; 213 set_wq_data(work, wq);
186 timer->expires = jiffies + delay; 214 timer->expires = jiffies + delay;
187 timer->data = (unsigned long)work; 215 timer->data = (unsigned long)dwork;
188 timer->function = delayed_work_timer_fn; 216 timer->function = delayed_work_timer_fn;
189 add_timer_on(timer, cpu); 217 add_timer_on(timer, cpu);
190 ret = 1; 218 ret = 1;
@@ -212,15 +240,15 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
212 while (!list_empty(&cwq->worklist)) { 240 while (!list_empty(&cwq->worklist)) {
213 struct work_struct *work = list_entry(cwq->worklist.next, 241 struct work_struct *work = list_entry(cwq->worklist.next,
214 struct work_struct, entry); 242 struct work_struct, entry);
215 void (*f) (void *) = work->func; 243 work_func_t f = work->func;
216 void *data = work->data;
217 244
218 list_del_init(cwq->worklist.next); 245 list_del_init(cwq->worklist.next);
219 spin_unlock_irqrestore(&cwq->lock, flags); 246 spin_unlock_irqrestore(&cwq->lock, flags);
220 247
221 BUG_ON(work->wq_data != cwq); 248 BUG_ON(get_wq_data(work) != cwq);
222 clear_bit(0, &work->pending); 249 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
223 f(data); 250 work_release(work);
251 f(work);
224 252
225 spin_lock_irqsave(&cwq->lock, flags); 253 spin_lock_irqsave(&cwq->lock, flags);
226 cwq->remove_sequence++; 254 cwq->remove_sequence++;
@@ -468,38 +496,37 @@ EXPORT_SYMBOL(schedule_work);
468 496
469/** 497/**
470 * schedule_delayed_work - put work task in global workqueue after delay 498 * schedule_delayed_work - put work task in global workqueue after delay
471 * @work: job to be done 499 * @dwork: job to be done
472 * @delay: number of jiffies to wait 500 * @delay: number of jiffies to wait or 0 for immediate execution
473 * 501 *
474 * After waiting for a given time this puts a job in the kernel-global 502 * After waiting for a given time this puts a job in the kernel-global
475 * workqueue. 503 * workqueue.
476 */ 504 */
477int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 505int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
478{ 506{
479 return queue_delayed_work(keventd_wq, work, delay); 507 return queue_delayed_work(keventd_wq, dwork, delay);
480} 508}
481EXPORT_SYMBOL(schedule_delayed_work); 509EXPORT_SYMBOL(schedule_delayed_work);
482 510
483/** 511/**
484 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 512 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
485 * @cpu: cpu to use 513 * @cpu: cpu to use
486 * @work: job to be done 514 * @dwork: job to be done
487 * @delay: number of jiffies to wait 515 * @delay: number of jiffies to wait
488 * 516 *
489 * After waiting for a given time this puts a job in the kernel-global 517 * After waiting for a given time this puts a job in the kernel-global
490 * workqueue on the specified CPU. 518 * workqueue on the specified CPU.
491 */ 519 */
492int schedule_delayed_work_on(int cpu, 520int schedule_delayed_work_on(int cpu,
493 struct work_struct *work, unsigned long delay) 521 struct delayed_work *dwork, unsigned long delay)
494{ 522{
495 return queue_delayed_work_on(cpu, keventd_wq, work, delay); 523 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
496} 524}
497EXPORT_SYMBOL(schedule_delayed_work_on); 525EXPORT_SYMBOL(schedule_delayed_work_on);
498 526
499/** 527/**
500 * schedule_on_each_cpu - call a function on each online CPU from keventd 528 * schedule_on_each_cpu - call a function on each online CPU from keventd
501 * @func: the function to call 529 * @func: the function to call
502 * @info: a pointer to pass to func()
503 * 530 *
504 * Returns zero on success. 531 * Returns zero on success.
505 * Returns -ve errno on failure. 532 * Returns -ve errno on failure.
@@ -508,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
508 * 535 *
509 * schedule_on_each_cpu() is very slow. 536 * schedule_on_each_cpu() is very slow.
510 */ 537 */
511int schedule_on_each_cpu(void (*func)(void *info), void *info) 538int schedule_on_each_cpu(work_func_t func)
512{ 539{
513 int cpu; 540 int cpu;
514 struct work_struct *works; 541 struct work_struct *works;
@@ -519,7 +546,7 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
519 546
520 mutex_lock(&workqueue_mutex); 547 mutex_lock(&workqueue_mutex);
521 for_each_online_cpu(cpu) { 548 for_each_online_cpu(cpu) {
522 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 549 INIT_WORK(per_cpu_ptr(works, cpu), func);
523 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 550 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
524 per_cpu_ptr(works, cpu)); 551 per_cpu_ptr(works, cpu));
525 } 552 }
@@ -539,12 +566,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
539 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 566 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
540 * work whose handler rearms the delayed work. 567 * work whose handler rearms the delayed work.
541 * @wq: the controlling workqueue structure 568 * @wq: the controlling workqueue structure
542 * @work: the delayed work struct 569 * @dwork: the delayed work struct
543 */ 570 */
544void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 571void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
545 struct work_struct *work) 572 struct delayed_work *dwork)
546{ 573{
547 while (!cancel_delayed_work(work)) 574 while (!cancel_delayed_work(dwork))
548 flush_workqueue(wq); 575 flush_workqueue(wq);
549} 576}
550EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 577EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,18 +579,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
552/** 579/**
553 * cancel_rearming_delayed_work - reliably kill off a delayed keventd 580 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
554 * work whose handler rearms the delayed work. 581 * work whose handler rearms the delayed work.
555 * @work: the delayed work struct 582 * @dwork: the delayed work struct
556 */ 583 */
557void cancel_rearming_delayed_work(struct work_struct *work) 584void cancel_rearming_delayed_work(struct delayed_work *dwork)
558{ 585{
559 cancel_rearming_delayed_workqueue(keventd_wq, work); 586 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
560} 587}
561EXPORT_SYMBOL(cancel_rearming_delayed_work); 588EXPORT_SYMBOL(cancel_rearming_delayed_work);
562 589
563/** 590/**
564 * execute_in_process_context - reliably execute the routine with user context 591 * execute_in_process_context - reliably execute the routine with user context
565 * @fn: the function to execute 592 * @fn: the function to execute
566 * @data: data to pass to the function
567 * @ew: guaranteed storage for the execute work structure (must 593 * @ew: guaranteed storage for the execute work structure (must
568 * be available when the work executes) 594 * be available when the work executes)
569 * 595 *
@@ -573,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
573 * Returns: 0 - function was executed 599 * Returns: 0 - function was executed
574 * 1 - function was scheduled for execution 600 * 1 - function was scheduled for execution
575 */ 601 */
576int execute_in_process_context(void (*fn)(void *data), void *data, 602int execute_in_process_context(work_func_t fn, struct execute_work *ew)
577 struct execute_work *ew)
578{ 603{
579 if (!in_interrupt()) { 604 if (!in_interrupt()) {
580 fn(data); 605 fn(&ew->work);
581 return 0; 606 return 0;
582 } 607 }
583 608
584 INIT_WORK(&ew->work, fn, data); 609 INIT_WORK(&ew->work, fn);
585 schedule_work(&ew->work); 610 schedule_work(&ew->work);
586 611
587 return 1; 612 return 1;
diff --git a/mm/nommu.c b/mm/nommu.c
index 8bdde9508f3b..6a2a8aada401 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -497,15 +497,17 @@ static int validate_mmap_request(struct file *file,
497 (flags & MAP_TYPE) != MAP_SHARED) 497 (flags & MAP_TYPE) != MAP_SHARED)
498 return -EINVAL; 498 return -EINVAL;
499 499
500 if (PAGE_ALIGN(len) == 0) 500 if (!len)
501 return addr;
502
503 if (len > TASK_SIZE)
504 return -EINVAL; 501 return -EINVAL;
505 502
503 /* Careful about overflows.. */
504 len = PAGE_ALIGN(len);
505 if (!len || len > TASK_SIZE)
506 return -ENOMEM;
507
506 /* offset overflow? */ 508 /* offset overflow? */
507 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 509 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
508 return -EINVAL; 510 return -EOVERFLOW;
509 511
510 if (file) { 512 if (file) {
511 /* validate file mapping requests */ 513 /* validate file mapping requests */
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e34eddc..5de81473df34 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
313static void free_block(struct kmem_cache *cachep, void **objpp, int len, 313static void free_block(struct kmem_cache *cachep, void **objpp, int len,
314 int node); 314 int node);
315static int enable_cpucache(struct kmem_cache *cachep); 315static int enable_cpucache(struct kmem_cache *cachep);
316static void cache_reap(void *unused); 316static void cache_reap(struct work_struct *unused);
317 317
318/* 318/*
319 * This function must be completely optimized away if a constant is passed to 319 * This function must be completely optimized away if a constant is passed to
@@ -753,7 +753,7 @@ int slab_is_available(void)
753 return g_cpucache_up == FULL; 753 return g_cpucache_up == FULL;
754} 754}
755 755
756static DEFINE_PER_CPU(struct work_struct, reap_work); 756static DEFINE_PER_CPU(struct delayed_work, reap_work);
757 757
758static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 758static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
759{ 759{
@@ -916,16 +916,16 @@ static void next_reap_node(void)
916 */ 916 */
917static void __devinit start_cpu_timer(int cpu) 917static void __devinit start_cpu_timer(int cpu)
918{ 918{
919 struct work_struct *reap_work = &per_cpu(reap_work, cpu); 919 struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
920 920
921 /* 921 /*
922 * When this gets called from do_initcalls via cpucache_init(), 922 * When this gets called from do_initcalls via cpucache_init(),
923 * init_workqueues() has already run, so keventd will be setup 923 * init_workqueues() has already run, so keventd will be setup
924 * at that time. 924 * at that time.
925 */ 925 */
926 if (keventd_up() && reap_work->func == NULL) { 926 if (keventd_up() && reap_work->work.func == NULL) {
927 init_reap_node(cpu); 927 init_reap_node(cpu);
928 INIT_WORK(reap_work, cache_reap, NULL); 928 INIT_DELAYED_WORK(reap_work, cache_reap);
929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
930 } 930 }
931} 931}
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3815 * If we cannot acquire the cache chain mutex then just give up - we'll try 3815 * If we cannot acquire the cache chain mutex then just give up - we'll try
3816 * again on the next iteration. 3816 * again on the next iteration.
3817 */ 3817 */
3818static void cache_reap(void *unused) 3818static void cache_reap(struct work_struct *unused)
3819{ 3819{
3820 struct kmem_cache *searchp; 3820 struct kmem_cache *searchp;
3821 struct kmem_list3 *l3; 3821 struct kmem_list3 *l3;
diff --git a/mm/swap.c b/mm/swap.c
index 2e0e871f542f..d9a3770d8f3c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -216,7 +216,7 @@ void lru_add_drain(void)
216} 216}
217 217
218#ifdef CONFIG_NUMA 218#ifdef CONFIG_NUMA
219static void lru_add_drain_per_cpu(void *dummy) 219static void lru_add_drain_per_cpu(struct work_struct *dummy)
220{ 220{
221 lru_add_drain(); 221 lru_add_drain();
222} 222}
@@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy)
226 */ 226 */
227int lru_add_drain_all(void) 227int lru_add_drain_all(void)
228{ 228{
229 return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); 229 return schedule_on_each_cpu(lru_add_drain_per_cpu);
230} 230}
231 231
232#else 232#else
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5946ec63724f..3fc0abeeaf34 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr,
1454 1454
1455#define LEC_ARP_REFRESH_INTERVAL (3*HZ) 1455#define LEC_ARP_REFRESH_INTERVAL (3*HZ)
1456 1456
1457static void lec_arp_check_expire(void *data); 1457static void lec_arp_check_expire(struct work_struct *work);
1458static void lec_arp_expire_arp(unsigned long data); 1458static void lec_arp_expire_arp(unsigned long data);
1459 1459
1460/* 1460/*
@@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv)
1477 INIT_HLIST_HEAD(&priv->lec_no_forward); 1477 INIT_HLIST_HEAD(&priv->lec_no_forward);
1478 INIT_HLIST_HEAD(&priv->mcast_fwds); 1478 INIT_HLIST_HEAD(&priv->mcast_fwds);
1479 spin_lock_init(&priv->lec_arp_lock); 1479 spin_lock_init(&priv->lec_arp_lock);
1480 INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); 1480 INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
1481 schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); 1481 schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
1482} 1482}
1483 1483
@@ -1875,10 +1875,11 @@ static void lec_arp_expire_vcc(unsigned long data)
1875 * to ESI_FORWARD_DIRECT. This causes the flush period to end 1875 * to ESI_FORWARD_DIRECT. This causes the flush period to end
1876 * regardless of the progress of the flush protocol. 1876 * regardless of the progress of the flush protocol.
1877 */ 1877 */
1878static void lec_arp_check_expire(void *data) 1878static void lec_arp_check_expire(struct work_struct *work)
1879{ 1879{
1880 unsigned long flags; 1880 unsigned long flags;
1881 struct lec_priv *priv = data; 1881 struct lec_priv *priv =
1882 container_of(work, struct lec_priv, lec_arp_work.work);
1882 struct hlist_node *node, *next; 1883 struct hlist_node *node, *next;
1883 struct lec_arp_table *entry; 1884 struct lec_arp_table *entry;
1884 unsigned long now; 1885 unsigned long now;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 24cc95f86741..99136babd535 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -92,7 +92,7 @@ struct lec_priv {
92 spinlock_t lec_arp_lock; 92 spinlock_t lec_arp_lock;
93 struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ 93 struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */
94 struct atm_vcc *lecd; 94 struct atm_vcc *lecd;
95 struct work_struct lec_arp_work; /* C10 */ 95 struct delayed_work lec_arp_work; /* C10 */
96 unsigned int maximum_unknown_frame_count; 96 unsigned int maximum_unknown_frame_count;
97 /* 97 /*
98 * Within the period of time defined by this variable, the client will send 98 * Within the period of time defined by this variable, the client will send
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3eeeb7a86e75..d4c935692ccf 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -237,9 +237,9 @@ static void bt_release(struct device *dev)
237 kfree(data); 237 kfree(data);
238} 238}
239 239
240static void add_conn(void *data) 240static void add_conn(struct work_struct *work)
241{ 241{
242 struct hci_conn *conn = data; 242 struct hci_conn *conn = container_of(work, struct hci_conn, work);
243 int i; 243 int i;
244 244
245 if (device_register(&conn->dev) < 0) { 245 if (device_register(&conn->dev) < 0) {
@@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
272 272
273 dev_set_drvdata(&conn->dev, conn); 273 dev_set_drvdata(&conn->dev, conn);
274 274
275 INIT_WORK(&conn->work, add_conn, (void *) conn); 275 INIT_WORK(&conn->work, add_conn);
276 276
277 schedule_work(&conn->work); 277 schedule_work(&conn->work);
278} 278}
279 279
280static void del_conn(void *data) 280static void del_conn(struct work_struct *work)
281{ 281{
282 struct hci_conn *conn = data; 282 struct hci_conn *conn = container_of(work, struct hci_conn, work);
283 device_del(&conn->dev); 283 device_del(&conn->dev);
284} 284}
285 285
@@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
287{ 287{
288 BT_DBG("conn %p", conn); 288 BT_DBG("conn %p", conn);
289 289
290 INIT_WORK(&conn->work, del_conn, (void *) conn); 290 INIT_WORK(&conn->work, del_conn);
291 291
292 schedule_work(&conn->work); 292 schedule_work(&conn->work);
293} 293}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f753c40c11d2..55bb2634c088 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev)
77 * Called from work queue to allow for calling functions that 77 * Called from work queue to allow for calling functions that
78 * might sleep (such as speed check), and to debounce. 78 * might sleep (such as speed check), and to debounce.
79 */ 79 */
80static void port_carrier_check(void *arg) 80static void port_carrier_check(struct work_struct *work)
81{ 81{
82 struct net_device *dev = arg;
83 struct net_bridge_port *p; 82 struct net_bridge_port *p;
83 struct net_device *dev;
84 struct net_bridge *br; 84 struct net_bridge *br;
85 85
86 dev = container_of(work, struct net_bridge_port,
87 carrier_check.work)->dev;
88 work_release(work);
89
86 rtnl_lock(); 90 rtnl_lock();
87 p = dev->br_port; 91 p = dev->br_port;
88 if (!p) 92 if (!p)
@@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
276 p->port_no = index; 280 p->port_no = index;
277 br_init_port(p); 281 br_init_port(p);
278 p->state = BR_STATE_DISABLED; 282 p->state = BR_STATE_DISABLED;
279 INIT_WORK(&p->carrier_check, port_carrier_check, dev); 283 INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
280 br_stp_port_timer_init(p); 284 br_stp_port_timer_init(p);
281 285
282 kobject_init(&p->kobj); 286 kobject_init(&p->kobj);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 74258d86f256..3a534e94c7f3 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,7 +82,7 @@ struct net_bridge_port
82 struct timer_list hold_timer; 82 struct timer_list hold_timer;
83 struct timer_list message_age_timer; 83 struct timer_list message_age_timer;
84 struct kobject kobj; 84 struct kobject kobj;
85 struct work_struct carrier_check; 85 struct delayed_work carrier_check;
86 struct rcu_head rcu; 86 struct rcu_head rcu;
87}; 87};
88 88
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4b36114744c5..549a2ce951b0 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -34,8 +34,8 @@ enum lw_bits {
34static unsigned long linkwatch_flags; 34static unsigned long linkwatch_flags;
35static unsigned long linkwatch_nextevent; 35static unsigned long linkwatch_nextevent;
36 36
37static void linkwatch_event(void *dummy); 37static void linkwatch_event(struct work_struct *dummy);
38static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); 38static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
39 39
40static LIST_HEAD(lweventlist); 40static LIST_HEAD(lweventlist);
41static DEFINE_SPINLOCK(lweventlist_lock); 41static DEFINE_SPINLOCK(lweventlist_lock);
@@ -127,7 +127,7 @@ void linkwatch_run_queue(void)
127} 127}
128 128
129 129
130static void linkwatch_event(void *dummy) 130static void linkwatch_event(struct work_struct *dummy)
131{ 131{
132 /* Limit the number of linkwatch events to one 132 /* Limit the number of linkwatch events to one
133 * per second so that a runaway driver does not 133 * per second so that a runaway driver does not
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
171 unsigned long delay = linkwatch_nextevent - jiffies; 171 unsigned long delay = linkwatch_nextevent - jiffies;
172 172
173 /* If we wrap around we'll delay it by at most HZ. */ 173 /* If we wrap around we'll delay it by at most HZ. */
174 if (!delay || delay > HZ) 174 if (delay > HZ)
175 schedule_work(&linkwatch_work); 175 delay = 0;
176 else 176 schedule_delayed_work(&linkwatch_work, delay);
177 schedule_delayed_work(&linkwatch_work, delay);
178 } 177 }
179 } 178 }
180} 179}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3c58846fcaa5..b3c559b9ac35 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -50,9 +50,10 @@ static atomic_t trapped;
50static void zap_completion_queue(void); 50static void zap_completion_queue(void);
51static void arp_reply(struct sk_buff *skb); 51static void arp_reply(struct sk_buff *skb);
52 52
53static void queue_process(void *p) 53static void queue_process(struct work_struct *work)
54{ 54{
55 struct netpoll_info *npinfo = p; 55 struct netpoll_info *npinfo =
56 container_of(work, struct netpoll_info, tx_work.work);
56 struct sk_buff *skb; 57 struct sk_buff *skb;
57 58
58 while ((skb = skb_dequeue(&npinfo->txq))) { 59 while ((skb = skb_dequeue(&npinfo->txq))) {
@@ -72,8 +73,6 @@ static void queue_process(void *p)
72 schedule_delayed_work(&npinfo->tx_work, HZ/10); 73 schedule_delayed_work(&npinfo->tx_work, HZ/10);
73 return; 74 return;
74 } 75 }
75
76 netif_tx_unlock_bh(dev);
77 } 76 }
78} 77}
79 78
@@ -263,7 +262,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
263 262
264 if (status != NETDEV_TX_OK) { 263 if (status != NETDEV_TX_OK) {
265 skb_queue_tail(&npinfo->txq, skb); 264 skb_queue_tail(&npinfo->txq, skb);
266 schedule_work(&npinfo->tx_work); 265 schedule_delayed_work(&npinfo->tx_work,0);
267 } 266 }
268} 267}
269 268
@@ -628,7 +627,7 @@ int netpoll_setup(struct netpoll *np)
628 spin_lock_init(&npinfo->rx_lock); 627 spin_lock_init(&npinfo->rx_lock);
629 skb_queue_head_init(&npinfo->arp_tx); 628 skb_queue_head_init(&npinfo->arp_tx);
630 skb_queue_head_init(&npinfo->txq); 629 skb_queue_head_init(&npinfo->txq);
631 INIT_WORK(&npinfo->tx_work, queue_process, npinfo); 630 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
632 631
633 atomic_set(&npinfo->refcnt, 1); 632 atomic_set(&npinfo->refcnt, 1);
634 } else { 633 } else {
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 7b52f2a03eef..4c9e26775f72 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -32,8 +32,7 @@ struct inet_timewait_death_row dccp_death_row = {
32 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 32 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
33 (unsigned long)&dccp_death_row), 33 (unsigned long)&dccp_death_row),
34 .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, 34 .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
35 inet_twdr_twkill_work, 35 inet_twdr_twkill_work),
36 &dccp_death_row),
37/* Short-time timewait calendar */ 36/* Short-time timewait calendar */
38 37
39 .twcal_hand = -1, 38 .twcal_hand = -1,
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index cf51c87a971d..08386c102954 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
58} 58}
59 59
60void 60void
61ieee80211softmac_assoc_timeout(void *d) 61ieee80211softmac_assoc_timeout(struct work_struct *work)
62{ 62{
63 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; 63 struct ieee80211softmac_device *mac =
64 container_of(work, struct ieee80211softmac_device,
65 associnfo.timeout.work);
64 struct ieee80211softmac_network *n; 66 struct ieee80211softmac_network *n;
65 67
66 mutex_lock(&mac->associnfo.mutex); 68 mutex_lock(&mac->associnfo.mutex);
@@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void
186 188
187/* This function is called to handle userspace requests (asynchronously) */ 189/* This function is called to handle userspace requests (asynchronously) */
188void 190void
189ieee80211softmac_assoc_work(void *d) 191ieee80211softmac_assoc_work(struct work_struct *work)
190{ 192{
191 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; 193 struct ieee80211softmac_device *mac =
194 container_of(work, struct ieee80211softmac_device,
195 associnfo.work.work);
192 struct ieee80211softmac_network *found = NULL; 196 struct ieee80211softmac_network *found = NULL;
193 struct ieee80211_network *net = NULL, *best = NULL; 197 struct ieee80211_network *net = NULL, *best = NULL;
194 int bssvalid; 198 int bssvalid;
@@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
412 network->authenticated = 0; 416 network->authenticated = 0;
413 /* we don't want to do this more than once ... */ 417 /* we don't want to do this more than once ... */
414 network->auth_desynced_once = 1; 418 network->auth_desynced_once = 1;
415 schedule_work(&mac->associnfo.work); 419 schedule_delayed_work(&mac->associnfo.work, 0);
416 break; 420 break;
417 } 421 }
418 default: 422 default:
@@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
446 ieee80211softmac_disassoc(mac); 450 ieee80211softmac_disassoc(mac);
447 451
448 /* try to reassociate */ 452 /* try to reassociate */
449 schedule_work(&mac->associnfo.work); 453 schedule_delayed_work(&mac->associnfo.work, 0);
450 454
451 return 0; 455 return 0;
452} 456}
@@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev,
466 dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); 470 dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
467 return 0; 471 return 0;
468 } 472 }
469 schedule_work(&mac->associnfo.work); 473 schedule_delayed_work(&mac->associnfo.work, 0);
470 474
471 return 0; 475 return 0;
472} 476}
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 0612015f1c78..6012705aa4f8 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -26,7 +26,7 @@
26 26
27#include "ieee80211softmac_priv.h" 27#include "ieee80211softmac_priv.h"
28 28
29static void ieee80211softmac_auth_queue(void *data); 29static void ieee80211softmac_auth_queue(struct work_struct *work);
30 30
31/* Queues an auth request to the desired AP */ 31/* Queues an auth request to the desired AP */
32int 32int
@@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
54 auth->mac = mac; 54 auth->mac = mac;
55 auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; 55 auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
56 auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; 56 auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
57 INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); 57 INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
58 58
59 /* Lock (for list) */ 59 /* Lock (for list) */
60 spin_lock_irqsave(&mac->lock, flags); 60 spin_lock_irqsave(&mac->lock, flags);
61 61
62 /* add to list */ 62 /* add to list */
63 list_add_tail(&auth->list, &mac->auth_queue); 63 list_add_tail(&auth->list, &mac->auth_queue);
64 schedule_work(&auth->work); 64 schedule_delayed_work(&auth->work, 0);
65 spin_unlock_irqrestore(&mac->lock, flags); 65 spin_unlock_irqrestore(&mac->lock, flags);
66 66
67 return 0; 67 return 0;
@@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
70 70
71/* Sends an auth request to the desired AP and handles timeouts */ 71/* Sends an auth request to the desired AP and handles timeouts */
72static void 72static void
73ieee80211softmac_auth_queue(void *data) 73ieee80211softmac_auth_queue(struct work_struct *work)
74{ 74{
75 struct ieee80211softmac_device *mac; 75 struct ieee80211softmac_device *mac;
76 struct ieee80211softmac_auth_queue_item *auth; 76 struct ieee80211softmac_auth_queue_item *auth;
77 struct ieee80211softmac_network *net; 77 struct ieee80211softmac_network *net;
78 unsigned long flags; 78 unsigned long flags;
79 79
80 auth = (struct ieee80211softmac_auth_queue_item *)data; 80 auth = container_of(work, struct ieee80211softmac_auth_queue_item,
81 work.work);
81 net = auth->net; 82 net = auth->net;
82 mac = auth->mac; 83 mac = auth->mac;
83 84
@@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data)
118 119
119/* Sends a response to an auth challenge (for shared key auth). */ 120/* Sends a response to an auth challenge (for shared key auth). */
120static void 121static void
121ieee80211softmac_auth_challenge_response(void *_aq) 122ieee80211softmac_auth_challenge_response(struct work_struct *work)
122{ 123{
123 struct ieee80211softmac_auth_queue_item *aq = _aq; 124 struct ieee80211softmac_auth_queue_item *aq =
125 container_of(work, struct ieee80211softmac_auth_queue_item,
126 work.work);
124 127
125 /* Send our response */ 128 /* Send our response */
126 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); 129 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
@@ -234,8 +237,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
234 * we have obviously already sent the initial auth 237 * we have obviously already sent the initial auth
235 * request. */ 238 * request. */
236 cancel_delayed_work(&aq->work); 239 cancel_delayed_work(&aq->work);
237 INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); 240 INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
238 schedule_work(&aq->work); 241 schedule_delayed_work(&aq->work, 0);
239 spin_unlock_irqrestore(&mac->lock, flags); 242 spin_unlock_irqrestore(&mac->lock, flags);
240 return 0; 243 return 0;
241 case IEEE80211SOFTMAC_AUTH_SHARED_PASS: 244 case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@ -398,6 +401,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
398 ieee80211softmac_deauth_from_net(mac, net); 401 ieee80211softmac_deauth_from_net(mac, net);
399 402
400 /* let's try to re-associate */ 403 /* let's try to re-associate */
401 schedule_work(&mac->associnfo.work); 404 schedule_delayed_work(&mac->associnfo.work, 0);
402 return 0; 405 return 0;
403} 406}
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index f34fa2ef666b..b9015656cfb3 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = {
73 73
74 74
75static void 75static void
76ieee80211softmac_notify_callback(void *d) 76ieee80211softmac_notify_callback(struct work_struct *work)
77{ 77{
78 struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; 78 struct ieee80211softmac_event *pevent =
79 kfree(d); 79 container_of(work, struct ieee80211softmac_event, work.work);
80 struct ieee80211softmac_event event = *pevent;
81 kfree(pevent);
80 82
81 event.fun(event.mac->dev, event.event_type, event.context); 83 event.fun(event.mac->dev, event.event_type, event.context);
82} 84}
@@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
99 return -ENOMEM; 101 return -ENOMEM;
100 102
101 eventptr->event_type = event; 103 eventptr->event_type = event;
102 INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); 104 INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
103 eventptr->fun = fun; 105 eventptr->fun = fun;
104 eventptr->context = context; 106 eventptr->context = context;
105 eventptr->mac = mac; 107 eventptr->mac = mac;
@@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
170 /* User may have subscribed to ANY event, so 172 /* User may have subscribed to ANY event, so
171 * we tell them which event triggered it. */ 173 * we tell them which event triggered it. */
172 eventptr->event_type = event; 174 eventptr->event_type = event;
173 schedule_work(&eventptr->work); 175 schedule_delayed_work(&eventptr->work, 0);
174 } 176 }
175 } 177 }
176} 178}
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 33aff4f4a471..256207b71dc9 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
58 INIT_LIST_HEAD(&softmac->events); 58 INIT_LIST_HEAD(&softmac->events);
59 59
60 mutex_init(&softmac->associnfo.mutex); 60 mutex_init(&softmac->associnfo.mutex);
61 INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); 61 INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
62 INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); 62 INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
63 softmac->start_scan = ieee80211softmac_start_scan_implementation; 63 softmac->start_scan = ieee80211softmac_start_scan_implementation;
64 softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; 64 softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
65 softmac->stop_scan = ieee80211softmac_stop_scan_implementation; 65 softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 0642e090b8a7..c0dbe070e548 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -78,7 +78,7 @@
78/* private definitions and prototypes */ 78/* private definitions and prototypes */
79 79
80/*** prototypes from _scan.c */ 80/*** prototypes from _scan.c */
81void ieee80211softmac_scan(void *sm); 81void ieee80211softmac_scan(struct work_struct *work);
82/* for internal use if scanning is needed */ 82/* for internal use if scanning is needed */
83int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); 83int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
84void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); 84void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
@@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au
149int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); 149int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
150 150
151/*** prototypes from _assoc.c */ 151/*** prototypes from _assoc.c */
152void ieee80211softmac_assoc_work(void *d); 152void ieee80211softmac_assoc_work(struct work_struct *work);
153int ieee80211softmac_handle_assoc_response(struct net_device * dev, 153int ieee80211softmac_handle_assoc_response(struct net_device * dev,
154 struct ieee80211_assoc_response * resp, 154 struct ieee80211_assoc_response * resp,
155 struct ieee80211_network * network); 155 struct ieee80211_network * network);
@@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev,
157 struct ieee80211_disassoc * disassoc); 157 struct ieee80211_disassoc * disassoc);
158int ieee80211softmac_handle_reassoc_req(struct net_device * dev, 158int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
159 struct ieee80211_reassoc_request * reassoc); 159 struct ieee80211_reassoc_request * reassoc);
160void ieee80211softmac_assoc_timeout(void *d); 160void ieee80211softmac_assoc_timeout(struct work_struct *work);
161void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); 161void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
162void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); 162void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
163 163
@@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item {
207 struct ieee80211softmac_device *mac; /* SoftMAC device */ 207 struct ieee80211softmac_device *mac; /* SoftMAC device */
208 u8 retry; /* Retry limit */ 208 u8 retry; /* Retry limit */
209 u8 state; /* Auth State */ 209 u8 state; /* Auth State */
210 struct work_struct work; /* Work queue */ 210 struct delayed_work work; /* Work queue */
211}; 211};
212 212
213/* scanning information */ 213/* scanning information */
@@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo {
219 stop:1; 219 stop:1;
220 u8 skip_flags; 220 u8 skip_flags;
221 struct completion finished; 221 struct completion finished;
222 struct work_struct softmac_scan; 222 struct delayed_work softmac_scan;
223 struct ieee80211softmac_device *mac;
223}; 224};
224 225
225/* private event struct */ 226/* private event struct */
@@ -227,7 +228,7 @@ struct ieee80211softmac_event {
227 struct list_head list; 228 struct list_head list;
228 int event_type; 229 int event_type;
229 void *event_context; 230 void *event_context;
230 struct work_struct work; 231 struct delayed_work work;
231 notify_function_ptr fun; 232 notify_function_ptr fun;
232 void *context; 233 void *context;
233 struct ieee80211softmac_device *mac; 234 struct ieee80211softmac_device *mac;
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index 5507feab32de..0c85d6c24cdb 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -90,12 +90,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm)
90 90
91 91
92/* internal scanning implementation follows */ 92/* internal scanning implementation follows */
93void ieee80211softmac_scan(void *d) 93void ieee80211softmac_scan(struct work_struct *work)
94{ 94{
95 int invalid_channel; 95 int invalid_channel;
96 u8 current_channel_idx; 96 u8 current_channel_idx;
97 struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; 97 struct ieee80211softmac_scaninfo *si =
98 struct ieee80211softmac_scaninfo *si = sm->scaninfo; 98 container_of(work, struct ieee80211softmac_scaninfo,
99 softmac_scan.work);
100 struct ieee80211softmac_device *sm = si->mac;
99 unsigned long flags; 101 unsigned long flags;
100 102
101 while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { 103 while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
@@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802
146 struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); 148 struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
147 if (unlikely(!info)) 149 if (unlikely(!info))
148 return NULL; 150 return NULL;
149 INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); 151 INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
152 info->mac = mac;
150 init_completion(&info->finished); 153 init_completion(&info->finished);
151 return info; 154 return info;
152} 155}
@@ -187,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev)
187 sm->scaninfo->started = 1; 190 sm->scaninfo->started = 1;
188 sm->scaninfo->stop = 0; 191 sm->scaninfo->stop = 0;
189 INIT_COMPLETION(sm->scaninfo->finished); 192 INIT_COMPLETION(sm->scaninfo->finished);
190 schedule_work(&sm->scaninfo->softmac_scan); 193 schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
191 spin_unlock_irqrestore(&sm->lock, flags); 194 spin_unlock_irqrestore(&sm->lock, flags);
192 return 0; 195 return 0;
193} 196}
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 23068a830f7d..2ffaebd21c53 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
122 122
123 sm->associnfo.associating = 1; 123 sm->associnfo.associating = 1;
124 /* queue lower level code to do work (if necessary) */ 124 /* queue lower level code to do work (if necessary) */
125 schedule_work(&sm->associnfo.work); 125 schedule_delayed_work(&sm->associnfo.work, 0);
126out: 126out:
127 mutex_unlock(&sm->associnfo.mutex); 127 mutex_unlock(&sm->associnfo.mutex);
128 128
@@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
356 /* force reassociation */ 356 /* force reassociation */
357 mac->associnfo.bssvalid = 0; 357 mac->associnfo.bssvalid = 0;
358 if (mac->associnfo.associated) 358 if (mac->associnfo.associated)
359 schedule_work(&mac->associnfo.work); 359 schedule_delayed_work(&mac->associnfo.work, 0);
360 } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { 360 } else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
361 /* the bssid we have is no longer fixed */ 361 /* the bssid we have is no longer fixed */
362 mac->associnfo.bssfixed = 0; 362 mac->associnfo.bssfixed = 0;
@@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
373 /* tell the other code that this bssid should be used no matter what */ 373 /* tell the other code that this bssid should be used no matter what */
374 mac->associnfo.bssfixed = 1; 374 mac->associnfo.bssfixed = 1;
375 /* queue associate if new bssid or (old one again and not associated) */ 375 /* queue associate if new bssid or (old one again and not associated) */
376 schedule_work(&mac->associnfo.work); 376 schedule_delayed_work(&mac->associnfo.work, 0);
377 } 377 }
378 378
379 out: 379 out:
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index cdd805344c61..8c74f9168b7d 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman);
197 197
198extern void twkill_slots_invalid(void); 198extern void twkill_slots_invalid(void);
199 199
200void inet_twdr_twkill_work(void *data) 200void inet_twdr_twkill_work(struct work_struct *work)
201{ 201{
202 struct inet_timewait_death_row *twdr = data; 202 struct inet_timewait_death_row *twdr =
203 container_of(work, struct inet_timewait_death_row, twkill_work);
203 int i; 204 int i;
204 205
205 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) 206 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f261616e4602..9b933381ebbe 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -221,10 +221,10 @@ static void update_defense_level(void)
221 * Timer for checking the defense 221 * Timer for checking the defense
222 */ 222 */
223#define DEFENSE_TIMER_PERIOD 1*HZ 223#define DEFENSE_TIMER_PERIOD 1*HZ
224static void defense_work_handler(void *data); 224static void defense_work_handler(struct work_struct *work);
225static DECLARE_WORK(defense_work, defense_work_handler, NULL); 225static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
226 226
227static void defense_work_handler(void *data) 227static void defense_work_handler(struct work_struct *work)
228{ 228{
229 update_defense_level(); 229 update_defense_level();
230 if (atomic_read(&ip_vs_dropentry)) 230 if (atomic_read(&ip_vs_dropentry))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6dddf59c1fb9..4a3889dd1943 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = {
45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
46 (unsigned long)&tcp_death_row), 46 (unsigned long)&tcp_death_row),
47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, 47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
48 inet_twdr_twkill_work, 48 inet_twdr_twkill_work),
49 &tcp_death_row),
50/* Short-time timewait calendar */ 49/* Short-time timewait calendar */
51 50
52 .twcal_hand = -1, 51 .twcal_hand = -1,
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index d50a02030ad7..262bda808d96 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty);
61static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); 61static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
62static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); 62static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
63static void ircomm_tty_hangup(struct tty_struct *tty); 63static void ircomm_tty_hangup(struct tty_struct *tty);
64static void ircomm_tty_do_softint(void *private_); 64static void ircomm_tty_do_softint(struct work_struct *work);
65static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); 65static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
66static void ircomm_tty_stop(struct tty_struct *tty); 66static void ircomm_tty_stop(struct tty_struct *tty);
67 67
@@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
389 self->flow = FLOW_STOP; 389 self->flow = FLOW_STOP;
390 390
391 self->line = line; 391 self->line = line;
392 INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); 392 INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
393 self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; 393 self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
394 self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; 394 self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
395 self->close_delay = 5*HZ/10; 395 self->close_delay = 5*HZ/10;
@@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty)
594} 594}
595 595
596/* 596/*
597 * Function ircomm_tty_do_softint (private_) 597 * Function ircomm_tty_do_softint (work)
598 * 598 *
599 * We use this routine to give the write wakeup to the user at at a 599 * We use this routine to give the write wakeup to the user at at a
600 * safe time (as fast as possible after write have completed). This 600 * safe time (as fast as possible after write have completed). This
601 * can be compared to the Tx interrupt. 601 * can be compared to the Tx interrupt.
602 */ 602 */
603static void ircomm_tty_do_softint(void *private_) 603static void ircomm_tty_do_softint(struct work_struct *work)
604{ 604{
605 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; 605 struct ircomm_tty_cb *self =
606 container_of(work, struct ircomm_tty_cb, tqueue);
606 struct tty_struct *tty; 607 struct tty_struct *tty;
607 unsigned long flags; 608 unsigned long flags;
608 struct sk_buff *skb, *ctrl_skb; 609 struct sk_buff *skb, *ctrl_skb;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 39471d3b31b9..ad0057db0f91 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -61,7 +61,7 @@
61#include <net/sctp/sm.h> 61#include <net/sctp/sm.h>
62 62
63/* Forward declarations for internal functions. */ 63/* Forward declarations for internal functions. */
64static void sctp_assoc_bh_rcv(struct sctp_association *asoc); 64static void sctp_assoc_bh_rcv(struct work_struct *work);
65 65
66 66
67/* 1st Level Abstractions. */ 67/* 1st Level Abstractions. */
@@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
269 269
270 /* Create an input queue. */ 270 /* Create an input queue. */
271 sctp_inq_init(&asoc->base.inqueue); 271 sctp_inq_init(&asoc->base.inqueue);
272 sctp_inq_set_th_handler(&asoc->base.inqueue, 272 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
273 (void (*)(void *))sctp_assoc_bh_rcv,
274 asoc);
275 273
276 /* Create an output queue. */ 274 /* Create an output queue. */
277 sctp_outq_init(asoc, &asoc->outqueue); 275 sctp_outq_init(asoc, &asoc->outqueue);
@@ -946,8 +944,11 @@ out:
946} 944}
947 945
948/* Do delayed input processing. This is scheduled by sctp_rcv(). */ 946/* Do delayed input processing. This is scheduled by sctp_rcv(). */
949static void sctp_assoc_bh_rcv(struct sctp_association *asoc) 947static void sctp_assoc_bh_rcv(struct work_struct *work)
950{ 948{
949 struct sctp_association *asoc =
950 container_of(work, struct sctp_association,
951 base.inqueue.immediate);
951 struct sctp_endpoint *ep; 952 struct sctp_endpoint *ep;
952 struct sctp_chunk *chunk; 953 struct sctp_chunk *chunk;
953 struct sock *sk; 954 struct sock *sk;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 33a42e90c32f..129756908da4 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -61,7 +61,7 @@
61#include <net/sctp/sm.h> 61#include <net/sctp/sm.h>
62 62
63/* Forward declarations for internal helpers. */ 63/* Forward declarations for internal helpers. */
64static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); 64static void sctp_endpoint_bh_rcv(struct work_struct *work);
65 65
66/* 66/*
67 * Initialize the base fields of the endpoint structure. 67 * Initialize the base fields of the endpoint structure.
@@ -89,8 +89,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
89 sctp_inq_init(&ep->base.inqueue); 89 sctp_inq_init(&ep->base.inqueue);
90 90
91 /* Set its top-half handler */ 91 /* Set its top-half handler */
92 sctp_inq_set_th_handler(&ep->base.inqueue, 92 sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
93 (void (*)(void *))sctp_endpoint_bh_rcv, ep);
94 93
95 /* Initialize the bind addr area */ 94 /* Initialize the bind addr area */
96 sctp_bind_addr_init(&ep->base.bind_addr, 0); 95 sctp_bind_addr_init(&ep->base.bind_addr, 0);
@@ -318,8 +317,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
318/* Do delayed input processing. This is scheduled by sctp_rcv(). 317/* Do delayed input processing. This is scheduled by sctp_rcv().
319 * This may be called on BH or task time. 318 * This may be called on BH or task time.
320 */ 319 */
321static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) 320static void sctp_endpoint_bh_rcv(struct work_struct *work)
322{ 321{
322 struct sctp_endpoint *ep =
323 container_of(work, struct sctp_endpoint,
324 base.inqueue.immediate);
323 struct sctp_association *asoc; 325 struct sctp_association *asoc;
324 struct sock *sk; 326 struct sock *sk;
325 struct sctp_transport *transport; 327 struct sctp_transport *transport;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cf6deed7e849..71b07466e880 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue)
54 queue->in_progress = NULL; 54 queue->in_progress = NULL;
55 55
56 /* Create a task for delivering data. */ 56 /* Create a task for delivering data. */
57 INIT_WORK(&queue->immediate, NULL, NULL); 57 INIT_WORK(&queue->immediate, NULL);
58 58
59 queue->malloced = 0; 59 queue->malloced = 0;
60} 60}
@@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
97 * on the BH related data structures. 97 * on the BH related data structures.
98 */ 98 */
99 list_add_tail(&chunk->list, &q->in_chunk_list); 99 list_add_tail(&chunk->list, &q->in_chunk_list);
100 q->immediate.func(q->immediate.data); 100 q->immediate.func(&q->immediate);
101} 101}
102 102
103/* Extract a chunk from an SCTP inqueue. 103/* Extract a chunk from an SCTP inqueue.
@@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
205 * The intent is that this routine will pull stuff out of the 205 * The intent is that this routine will pull stuff out of the
206 * inqueue and process it. 206 * inqueue and process it.
207 */ 207 */
208void sctp_inq_set_th_handler(struct sctp_inq *q, 208void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
209 void (*callback)(void *), void *arg)
210{ 209{
211 INIT_WORK(&q->immediate, callback, arg); 210 INIT_WORK(&q->immediate, callback);
212} 211}
213 212
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 00cb388ece03..d96fd466a9a4 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -284,8 +284,8 @@ static struct file_operations cache_file_operations;
284static struct file_operations content_file_operations; 284static struct file_operations content_file_operations;
285static struct file_operations cache_flush_operations; 285static struct file_operations cache_flush_operations;
286 286
287static void do_cache_clean(void *data); 287static void do_cache_clean(struct work_struct *work);
288static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); 288static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
289 289
290void cache_register(struct cache_detail *cd) 290void cache_register(struct cache_detail *cd)
291{ 291{
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
337 spin_unlock(&cache_list_lock); 337 spin_unlock(&cache_list_lock);
338 338
339 /* start the cleaning process */ 339 /* start the cleaning process */
340 schedule_work(&cache_cleaner); 340 schedule_delayed_work(&cache_cleaner, 0);
341} 341}
342 342
343int cache_unregister(struct cache_detail *cd) 343int cache_unregister(struct cache_detail *cd)
@@ -461,7 +461,7 @@ static int cache_clean(void)
461/* 461/*
462 * We want to regularly clean the cache, so we need to schedule some work ... 462 * We want to regularly clean the cache, so we need to schedule some work ...
463 */ 463 */
464static void do_cache_clean(void *data) 464static void do_cache_clean(struct work_struct *work)
465{ 465{
466 int delay = 5; 466 int delay = 5;
467 if (cache_clean() == -1) 467 if (cache_clean() == -1)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9a0b41a97f90..49dba5febbbd 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
54} 54}
55 55
56static void 56static void
57rpc_timeout_upcall_queue(void *data) 57rpc_timeout_upcall_queue(struct work_struct *work)
58{ 58{
59 LIST_HEAD(free_list); 59 LIST_HEAD(free_list);
60 struct rpc_inode *rpci = (struct rpc_inode *)data; 60 struct rpc_inode *rpci =
61 container_of(work, struct rpc_inode, queue_timeout.work);
61 struct inode *inode = &rpci->vfs_inode; 62 struct inode *inode = &rpci->vfs_inode;
62 void (*destroy_msg)(struct rpc_pipe_msg *); 63 void (*destroy_msg)(struct rpc_pipe_msg *);
63 64
@@ -837,7 +838,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
837 INIT_LIST_HEAD(&rpci->pipe); 838 INIT_LIST_HEAD(&rpci->pipe);
838 rpci->pipelen = 0; 839 rpci->pipelen = 0;
839 init_waitqueue_head(&rpci->waitq); 840 init_waitqueue_head(&rpci->waitq);
840 INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); 841 INIT_DELAYED_WORK(&rpci->queue_timeout,
842 rpc_timeout_upcall_queue);
841 rpci->ops = NULL; 843 rpci->ops = NULL;
842 } 844 }
843} 845}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index a1ab4eed41f4..eff44bcdc95a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -41,7 +41,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly;
41 41
42static void __rpc_default_timer(struct rpc_task *task); 42static void __rpc_default_timer(struct rpc_task *task);
43static void rpciod_killall(void); 43static void rpciod_killall(void);
44static void rpc_async_schedule(void *); 44static void rpc_async_schedule(struct work_struct *);
45 45
46/* 46/*
47 * RPC tasks sit here while waiting for conditions to improve. 47 * RPC tasks sit here while waiting for conditions to improve.
@@ -305,7 +305,7 @@ static void rpc_make_runnable(struct rpc_task *task)
305 if (RPC_IS_ASYNC(task)) { 305 if (RPC_IS_ASYNC(task)) {
306 int status; 306 int status;
307 307
308 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); 308 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
309 status = queue_work(task->tk_workqueue, &task->u.tk_work); 309 status = queue_work(task->tk_workqueue, &task->u.tk_work);
310 if (status < 0) { 310 if (status < 0) {
311 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); 311 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
@@ -695,9 +695,9 @@ rpc_execute(struct rpc_task *task)
695 return __rpc_execute(task); 695 return __rpc_execute(task);
696} 696}
697 697
698static void rpc_async_schedule(void *arg) 698static void rpc_async_schedule(struct work_struct *work)
699{ 699{
700 __rpc_execute((struct rpc_task *)arg); 700 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
701} 701}
702 702
703/** 703/**
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 80857470dc11..4f9a5d9791fb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -479,9 +479,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
479 return status; 479 return status;
480} 480}
481 481
482static void xprt_autoclose(void *args) 482static void xprt_autoclose(struct work_struct *work)
483{ 483{
484 struct rpc_xprt *xprt = (struct rpc_xprt *)args; 484 struct rpc_xprt *xprt =
485 container_of(work, struct rpc_xprt, task_cleanup);
485 486
486 xprt_disconnect(xprt); 487 xprt_disconnect(xprt);
487 xprt->ops->close(xprt); 488 xprt->ops->close(xprt);
@@ -932,7 +933,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si
932 933
933 INIT_LIST_HEAD(&xprt->free); 934 INIT_LIST_HEAD(&xprt->free);
934 INIT_LIST_HEAD(&xprt->recv); 935 INIT_LIST_HEAD(&xprt->recv);
935 INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); 936 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
936 init_timer(&xprt->timer); 937 init_timer(&xprt->timer);
937 xprt->timer.function = xprt_init_autodisconnect; 938 xprt->timer.function = xprt_init_autodisconnect;
938 xprt->timer.data = (unsigned long) xprt; 939 xprt->timer.data = (unsigned long) xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 757fc91ef25d..cfe3c15be948 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1060,13 +1060,14 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
1060 1060
1061/** 1061/**
1062 * xs_udp_connect_worker - set up a UDP socket 1062 * xs_udp_connect_worker - set up a UDP socket
1063 * @args: RPC transport to connect 1063 * @work: RPC transport to connect
1064 * 1064 *
1065 * Invoked by a work queue tasklet. 1065 * Invoked by a work queue tasklet.
1066 */ 1066 */
1067static void xs_udp_connect_worker(void *args) 1067static void xs_udp_connect_worker(struct work_struct *work)
1068{ 1068{
1069 struct rpc_xprt *xprt = (struct rpc_xprt *) args; 1069 struct rpc_xprt *xprt =
1070 container_of(work, struct rpc_xprt, connect_worker.work);
1070 struct socket *sock = xprt->sock; 1071 struct socket *sock = xprt->sock;
1071 int err, status = -EIO; 1072 int err, status = -EIO;
1072 1073
@@ -1144,13 +1145,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
1144 1145
1145/** 1146/**
1146 * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint 1147 * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
1147 * @args: RPC transport to connect 1148 * @work: RPC transport to connect
1148 * 1149 *
1149 * Invoked by a work queue tasklet. 1150 * Invoked by a work queue tasklet.
1150 */ 1151 */
1151static void xs_tcp_connect_worker(void *args) 1152static void xs_tcp_connect_worker(struct work_struct *work)
1152{ 1153{
1153 struct rpc_xprt *xprt = (struct rpc_xprt *)args; 1154 struct rpc_xprt *xprt =
1155 container_of(work, struct rpc_xprt, connect_worker.work);
1154 struct socket *sock = xprt->sock; 1156 struct socket *sock = xprt->sock;
1155 int err, status = -EIO; 1157 int err, status = -EIO;
1156 1158
@@ -1262,7 +1264,7 @@ static void xs_connect(struct rpc_task *task)
1262 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 1264 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
1263 } else { 1265 } else {
1264 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 1266 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
1265 schedule_work(&xprt->connect_worker); 1267 schedule_delayed_work(&xprt->connect_worker, 0);
1266 1268
1267 /* flush_scheduled_work can sleep... */ 1269 /* flush_scheduled_work can sleep... */
1268 if (!RPC_IS_ASYNC(task)) 1270 if (!RPC_IS_ASYNC(task))
@@ -1375,7 +1377,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1375 /* XXX: header size can vary due to auth type, IPv6, etc. */ 1377 /* XXX: header size can vary due to auth type, IPv6, etc. */
1376 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 1378 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
1377 1379
1378 INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); 1380 INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker);
1379 xprt->bind_timeout = XS_BIND_TO; 1381 xprt->bind_timeout = XS_BIND_TO;
1380 xprt->connect_timeout = XS_UDP_CONN_TO; 1382 xprt->connect_timeout = XS_UDP_CONN_TO;
1381 xprt->reestablish_timeout = XS_UDP_REEST_TO; 1383 xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1422,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1420 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1422 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1421 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1423 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1422 1424
1423 INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); 1425 INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker);
1424 xprt->bind_timeout = XS_BIND_TO; 1426 xprt->bind_timeout = XS_BIND_TO;
1425 xprt->connect_timeout = XS_TCP_CONN_TO; 1427 xprt->connect_timeout = XS_TCP_CONN_TO;
1426 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1428 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 64d3938f74c4..f6c77bd36fdd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -392,7 +392,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
392 xfrm_pol_put(policy); 392 xfrm_pol_put(policy);
393} 393}
394 394
395static void xfrm_policy_gc_task(void *data) 395static void xfrm_policy_gc_task(struct work_struct *work)
396{ 396{
397 struct xfrm_policy *policy; 397 struct xfrm_policy *policy;
398 struct hlist_node *entry, *tmp; 398 struct hlist_node *entry, *tmp;
@@ -580,7 +580,7 @@ static inline int xfrm_byidx_should_resize(int total)
580 580
581static DEFINE_MUTEX(hash_resize_mutex); 581static DEFINE_MUTEX(hash_resize_mutex);
582 582
583static void xfrm_hash_resize(void *__unused) 583static void xfrm_hash_resize(struct work_struct *__unused)
584{ 584{
585 int dir, total; 585 int dir, total;
586 586
@@ -597,7 +597,7 @@ static void xfrm_hash_resize(void *__unused)
597 mutex_unlock(&hash_resize_mutex); 597 mutex_unlock(&hash_resize_mutex);
598} 598}
599 599
600static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); 600static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
601 601
602/* Generate new index... KAME seems to generate them ordered by cost 602/* Generate new index... KAME seems to generate them ordered by cost
603 * of an absolute inpredictability of ordering of rules. This will not pass. */ 603 * of an absolute inpredictability of ordering of rules. This will not pass. */
@@ -2116,7 +2116,7 @@ static void __init xfrm_policy_init(void)
2116 panic("XFRM: failed to allocate bydst hash\n"); 2116 panic("XFRM: failed to allocate bydst hash\n");
2117 } 2117 }
2118 2118
2119 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); 2119 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
2120 register_netdevice_notifier(&xfrm_dev_notifier); 2120 register_netdevice_notifier(&xfrm_dev_notifier);
2121} 2121}
2122 2122
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 864962bbda90..da54a64ccfa3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void)
115 115
116static DEFINE_MUTEX(hash_resize_mutex); 116static DEFINE_MUTEX(hash_resize_mutex);
117 117
118static void xfrm_hash_resize(void *__unused) 118static void xfrm_hash_resize(struct work_struct *__unused)
119{ 119{
120 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; 120 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
121 unsigned long nsize, osize; 121 unsigned long nsize, osize;
@@ -168,7 +168,7 @@ out_unlock:
168 mutex_unlock(&hash_resize_mutex); 168 mutex_unlock(&hash_resize_mutex);
169} 169}
170 170
171static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); 171static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
172 172
173DECLARE_WAIT_QUEUE_HEAD(km_waitq); 173DECLARE_WAIT_QUEUE_HEAD(km_waitq);
174EXPORT_SYMBOL(km_waitq); 174EXPORT_SYMBOL(km_waitq);
@@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
207 kfree(x); 207 kfree(x);
208} 208}
209 209
210static void xfrm_state_gc_task(void *data) 210static void xfrm_state_gc_task(struct work_struct *data)
211{ 211{
212 struct xfrm_state *x; 212 struct xfrm_state *x;
213 struct hlist_node *entry, *tmp; 213 struct hlist_node *entry, *tmp;
@@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void)
1568 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); 1568 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1569 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); 1569 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1570 1570
1571 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); 1571 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1572} 1572}
1573 1573
diff --git a/security/keys/key.c b/security/keys/key.c
index 80de8c3e9cc3..70eacbe5abde 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -30,8 +30,8 @@ DEFINE_SPINLOCK(key_user_lock);
30static LIST_HEAD(key_types_list); 30static LIST_HEAD(key_types_list);
31static DECLARE_RWSEM(key_types_sem); 31static DECLARE_RWSEM(key_types_sem);
32 32
33static void key_cleanup(void *data); 33static void key_cleanup(struct work_struct *work);
34static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); 34static DECLARE_WORK(key_cleanup_task, key_cleanup);
35 35
36/* we serialise key instantiation and link */ 36/* we serialise key instantiation and link */
37DECLARE_RWSEM(key_construction_sem); 37DECLARE_RWSEM(key_construction_sem);
@@ -552,7 +552,7 @@ EXPORT_SYMBOL(key_negate_and_link);
552 * do cleaning up in process context so that we don't have to disable 552 * do cleaning up in process context so that we don't have to disable
553 * interrupts all over the place 553 * interrupts all over the place
554 */ 554 */
555static void key_cleanup(void *data) 555static void key_cleanup(struct work_struct *work)
556{ 556{
557 struct rb_node *_n; 557 struct rb_node *_n;
558 struct key *key; 558 struct key *key;
diff --git a/sound/aoa/aoa-gpio.h b/sound/aoa/aoa-gpio.h
index 3a61f3115573..ee64f5de8966 100644
--- a/sound/aoa/aoa-gpio.h
+++ b/sound/aoa/aoa-gpio.h
@@ -59,10 +59,10 @@ struct gpio_methods {
59}; 59};
60 60
61struct gpio_notification { 61struct gpio_notification {
62 struct delayed_work work;
62 notify_func_t notify; 63 notify_func_t notify;
63 void *data; 64 void *data;
64 void *gpio_private; 65 void *gpio_private;
65 struct work_struct work;
66 struct mutex mutex; 66 struct mutex mutex;
67}; 67};
68 68
diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c
index 40eb47eccf9a..2b03bc798bcb 100644
--- a/sound/aoa/core/snd-aoa-gpio-feature.c
+++ b/sound/aoa/core/snd-aoa-gpio-feature.c
@@ -195,9 +195,10 @@ static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt)
195 ftr_gpio_set_lineout(rt, (s>>2)&1); 195 ftr_gpio_set_lineout(rt, (s>>2)&1);
196} 196}
197 197
198static void ftr_handle_notify(void *data) 198static void ftr_handle_notify(struct work_struct *work)
199{ 199{
200 struct gpio_notification *notif = data; 200 struct gpio_notification *notif =
201 container_of(work, struct gpio_notification, work.work);
201 202
202 mutex_lock(&notif->mutex); 203 mutex_lock(&notif->mutex);
203 if (notif->notify) 204 if (notif->notify)
@@ -253,12 +254,9 @@ static void ftr_gpio_init(struct gpio_runtime *rt)
253 254
254 ftr_gpio_all_amps_off(rt); 255 ftr_gpio_all_amps_off(rt);
255 rt->implementation_private = 0; 256 rt->implementation_private = 0;
256 INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify, 257 INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
257 &rt->headphone_notify); 258 INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
258 INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify, 259 INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
259 &rt->line_in_notify);
260 INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify,
261 &rt->line_out_notify);
262 mutex_init(&rt->headphone_notify.mutex); 260 mutex_init(&rt->headphone_notify.mutex);
263 mutex_init(&rt->line_in_notify.mutex); 261 mutex_init(&rt->line_in_notify.mutex);
264 mutex_init(&rt->line_out_notify.mutex); 262 mutex_init(&rt->line_out_notify.mutex);
@@ -287,7 +285,7 @@ static irqreturn_t ftr_handle_notify_irq(int xx, void *data)
287{ 285{
288 struct gpio_notification *notif = data; 286 struct gpio_notification *notif = data;
289 287
290 schedule_work(&notif->work); 288 schedule_delayed_work(&notif->work, 0);
291 289
292 return IRQ_HANDLED; 290 return IRQ_HANDLED;
293} 291}
diff --git a/sound/aoa/core/snd-aoa-gpio-pmf.c b/sound/aoa/core/snd-aoa-gpio-pmf.c
index 2836c3218391..5ca2220eac7d 100644
--- a/sound/aoa/core/snd-aoa-gpio-pmf.c
+++ b/sound/aoa/core/snd-aoa-gpio-pmf.c
@@ -69,9 +69,10 @@ static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt)
69 pmf_gpio_set_lineout(rt, (s>>2)&1); 69 pmf_gpio_set_lineout(rt, (s>>2)&1);
70} 70}
71 71
72static void pmf_handle_notify(void *data) 72static void pmf_handle_notify(struct work_struct *work)
73{ 73{
74 struct gpio_notification *notif = data; 74 struct gpio_notification *notif =
75 container_of(work, struct gpio_notification, work.work);
75 76
76 mutex_lock(&notif->mutex); 77 mutex_lock(&notif->mutex);
77 if (notif->notify) 78 if (notif->notify)
@@ -83,12 +84,9 @@ static void pmf_gpio_init(struct gpio_runtime *rt)
83{ 84{
84 pmf_gpio_all_amps_off(rt); 85 pmf_gpio_all_amps_off(rt);
85 rt->implementation_private = 0; 86 rt->implementation_private = 0;
86 INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify, 87 INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify);
87 &rt->headphone_notify); 88 INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify);
88 INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify, 89 INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify);
89 &rt->line_in_notify);
90 INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify,
91 &rt->line_out_notify);
92 mutex_init(&rt->headphone_notify.mutex); 90 mutex_init(&rt->headphone_notify.mutex);
93 mutex_init(&rt->line_in_notify.mutex); 91 mutex_init(&rt->line_in_notify.mutex);
94 mutex_init(&rt->line_out_notify.mutex); 92 mutex_init(&rt->line_out_notify.mutex);
@@ -129,7 +127,7 @@ static void pmf_handle_notify_irq(void *data)
129{ 127{
130 struct gpio_notification *notif = data; 128 struct gpio_notification *notif = data;
131 129
132 schedule_work(&notif->work); 130 schedule_delayed_work(&notif->work, 0);
133} 131}
134 132
135static int pmf_set_notify(struct gpio_runtime *rt, 133static int pmf_set_notify(struct gpio_runtime *rt,
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 12ffffc9e814..d2f2c5078e65 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
35 35
36#define AK4114_ADDR 0x00 /* fixed address */ 36#define AK4114_ADDR 0x00 /* fixed address */
37 37
38static void ak4114_stats(void *); 38static void ak4114_stats(struct work_struct *work);
39 39
40static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val) 40static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val)
41{ 41{
@@ -158,7 +158,7 @@ void snd_ak4114_reinit(struct ak4114 *chip)
158 reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN); 158 reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN);
159 /* bring up statistics / event queing */ 159 /* bring up statistics / event queing */
160 chip->init = 0; 160 chip->init = 0;
161 INIT_WORK(&chip->work, ak4114_stats, chip); 161 INIT_DELAYED_WORK(&chip->work, ak4114_stats);
162 queue_delayed_work(chip->workqueue, &chip->work, HZ / 10); 162 queue_delayed_work(chip->workqueue, &chip->work, HZ / 10);
163} 163}
164 164
@@ -561,9 +561,9 @@ int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags)
561 return res; 561 return res;
562} 562}
563 563
564static void ak4114_stats(void *data) 564static void ak4114_stats(struct work_struct *work)
565{ 565{
566 struct ak4114 *chip = (struct ak4114 *)data; 566 struct ak4114 *chip = container_of(work, struct ak4114, work.work);
567 567
568 if (chip->init) 568 if (chip->init)
569 return; 569 return;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 6577b2325357..7abcb10b2754 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1927,9 +1927,10 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
1927static struct snd_ac97_build_ops null_build_ops; 1927static struct snd_ac97_build_ops null_build_ops;
1928 1928
1929#ifdef CONFIG_SND_AC97_POWER_SAVE 1929#ifdef CONFIG_SND_AC97_POWER_SAVE
1930static void do_update_power(void *data) 1930static void do_update_power(struct work_struct *work)
1931{ 1931{
1932 update_power_regs(data); 1932 update_power_regs(
1933 container_of(work, struct snd_ac97, power_work.work));
1933} 1934}
1934#endif 1935#endif
1935 1936
@@ -1989,7 +1990,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
1989 mutex_init(&ac97->page_mutex); 1990 mutex_init(&ac97->page_mutex);
1990#ifdef CONFIG_SND_AC97_POWER_SAVE 1991#ifdef CONFIG_SND_AC97_POWER_SAVE
1991 ac97->power_workq = create_workqueue("ac97"); 1992 ac97->power_workq = create_workqueue("ac97");
1992 INIT_WORK(&ac97->power_work, do_update_power, ac97); 1993 INIT_DELAYED_WORK(&ac97->power_work, do_update_power);
1993#endif 1994#endif
1994 1995
1995#ifdef CONFIG_PCI 1996#ifdef CONFIG_PCI
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9c3d7ac08068..71482c15a852 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -272,10 +272,11 @@ EXPORT_SYMBOL(snd_hda_queue_unsol_event);
272/* 272/*
273 * process queueud unsolicited events 273 * process queueud unsolicited events
274 */ 274 */
275static void process_unsol_events(void *data) 275static void process_unsol_events(struct work_struct *work)
276{ 276{
277 struct hda_bus *bus = data; 277 struct hda_bus_unsolicited *unsol =
278 struct hda_bus_unsolicited *unsol = bus->unsol; 278 container_of(work, struct hda_bus_unsolicited, work);
279 struct hda_bus *bus = unsol->bus;
279 struct hda_codec *codec; 280 struct hda_codec *codec;
280 unsigned int rp, caddr, res; 281 unsigned int rp, caddr, res;
281 282
@@ -314,7 +315,8 @@ static int init_unsol_queue(struct hda_bus *bus)
314 kfree(unsol); 315 kfree(unsol);
315 return -ENOMEM; 316 return -ENOMEM;
316 } 317 }
317 INIT_WORK(&unsol->work, process_unsol_events, bus); 318 INIT_WORK(&unsol->work, process_unsol_events);
319 unsol->bus = bus;
318 bus->unsol = unsol; 320 bus->unsol = unsol;
319 return 0; 321 return 0;
320} 322}
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index f9416c36396e..9ca1baf860bd 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -206,6 +206,7 @@ struct hda_bus_unsolicited {
206 /* workqueue */ 206 /* workqueue */
207 struct workqueue_struct *workq; 207 struct workqueue_struct *workq;
208 struct work_struct work; 208 struct work_struct work;
209 struct hda_bus *bus;
209}; 210};
210 211
211/* 212/*
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index fd3590fcaedb..2d40cc72f236 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -219,35 +219,15 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
219static int pdacf_config(struct pcmcia_device *link) 219static int pdacf_config(struct pcmcia_device *link)
220{ 220{
221 struct snd_pdacf *pdacf = link->priv; 221 struct snd_pdacf *pdacf = link->priv;
222 tuple_t tuple;
223 cisparse_t *parse = NULL;
224 u_short buf[32];
225 int last_fn, last_ret; 222 int last_fn, last_ret;
226 223
227 snd_printdd(KERN_DEBUG "pdacf_config called\n"); 224 snd_printdd(KERN_DEBUG "pdacf_config called\n");
228 parse = kmalloc(sizeof(*parse), GFP_KERNEL);
229 if (! parse) {
230 snd_printk(KERN_ERR "pdacf_config: cannot allocate\n");
231 return -ENOMEM;
232 }
233 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
234 tuple.Attributes = 0;
235 tuple.TupleData = (cisdata_t *)buf;
236 tuple.TupleDataMax = sizeof(buf);
237 tuple.TupleOffset = 0;
238 tuple.DesiredTuple = CISTPL_CONFIG;
239 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
240 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
241 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
242 link->conf.ConfigBase = parse->config.base;
243 link->conf.ConfigIndex = 0x5; 225 link->conf.ConfigIndex = 0x5;
244 226
245 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); 227 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
246 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 228 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
247 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 229 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
248 230
249 kfree(parse);
250
251 if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq.AssignedIRQ) < 0) 231 if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
252 goto failed; 232 goto failed;
253 233
@@ -255,7 +235,6 @@ static int pdacf_config(struct pcmcia_device *link)
255 return 0; 235 return 0;
256 236
257cs_failed: 237cs_failed:
258 kfree(parse);
259 cs_error(link, last_fn, last_ret); 238 cs_error(link, last_fn, last_ret);
260failed: 239failed:
261 pcmcia_disable_device(link); 240 pcmcia_disable_device(link);
@@ -299,7 +278,8 @@ static int pdacf_resume(struct pcmcia_device *link)
299 * Module entry points 278 * Module entry points
300 */ 279 */
301static struct pcmcia_device_id snd_pdacf_ids[] = { 280static struct pcmcia_device_id snd_pdacf_ids[] = {
302 PCMCIA_DEVICE_MANF_CARD(0x015d, 0x4c45), 281 /* this is too general PCMCIA_DEVICE_MANF_CARD(0x015d, 0x4c45), */
282 PCMCIA_DEVICE_PROD_ID12("Core Sound","PDAudio-CF",0x396d19d2,0x71717b49),
303 PCMCIA_DEVICE_NULL 283 PCMCIA_DEVICE_NULL
304}; 284};
305MODULE_DEVICE_TABLE(pcmcia, snd_pdacf_ids); 285MODULE_DEVICE_TABLE(pcmcia, snd_pdacf_ids);
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 3089fcca800e..d7df59e9c647 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -217,34 +217,12 @@ static int vxpocket_config(struct pcmcia_device *link)
217{ 217{
218 struct vx_core *chip = link->priv; 218 struct vx_core *chip = link->priv;
219 struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip; 219 struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip;
220 tuple_t tuple;
221 cisparse_t *parse;
222 u_short buf[32];
223 int last_fn, last_ret; 220 int last_fn, last_ret;
224 221
225 snd_printdd(KERN_DEBUG "vxpocket_config called\n"); 222 snd_printdd(KERN_DEBUG "vxpocket_config called\n");
226 parse = kmalloc(sizeof(*parse), GFP_KERNEL);
227 if (! parse) {
228 snd_printk(KERN_ERR "vx: cannot allocate\n");
229 return -ENOMEM;
230 }
231 tuple.Attributes = 0;
232 tuple.TupleData = (cisdata_t *)buf;
233 tuple.TupleDataMax = sizeof(buf);
234 tuple.TupleOffset = 0;
235 tuple.DesiredTuple = CISTPL_CONFIG;
236 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
237 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
238 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
239 link->conf.ConfigBase = parse->config.base;
240 link->conf.Present = parse->config.rmask[0];
241 223
242 /* redefine hardware record according to the VERSION1 string */ 224 /* redefine hardware record according to the VERSION1 string */
243 tuple.DesiredTuple = CISTPL_VERS_1; 225 if (!strcmp(link->prod_id[1], "VX-POCKET")) {
244 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
245 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
246 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
247 if (! strcmp(parse->version_1.str + parse->version_1.ofs[1], "VX-POCKET")) {
248 snd_printdd("VX-pocket is detected\n"); 226 snd_printdd("VX-pocket is detected\n");
249 } else { 227 } else {
250 snd_printdd("VX-pocket 440 is detected\n"); 228 snd_printdd("VX-pocket 440 is detected\n");
@@ -265,14 +243,12 @@ static int vxpocket_config(struct pcmcia_device *link)
265 goto failed; 243 goto failed;
266 244
267 link->dev_node = &vxp->node; 245 link->dev_node = &vxp->node;
268 kfree(parse);
269 return 0; 246 return 0;
270 247
271cs_failed: 248cs_failed:
272 cs_error(link, last_fn, last_ret); 249 cs_error(link, last_fn, last_ret);
273failed: 250failed:
274 pcmcia_disable_device(link); 251 pcmcia_disable_device(link);
275 kfree(parse);
276 return -ENODEV; 252 return -ENODEV;
277} 253}
278 254
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 2fbe1d183fce..8f074c7936e6 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -942,10 +942,11 @@ static void check_mute(struct snd_pmac *chip, struct pmac_gpio *gp, int val, int
942} 942}
943 943
944static struct work_struct device_change; 944static struct work_struct device_change;
945static struct snd_pmac *device_change_chip;
945 946
946static void device_change_handler(void *self) 947static void device_change_handler(struct work_struct *work)
947{ 948{
948 struct snd_pmac *chip = self; 949 struct snd_pmac *chip = device_change_chip;
949 struct pmac_tumbler *mix; 950 struct pmac_tumbler *mix;
950 int headphone, lineout; 951 int headphone, lineout;
951 952
@@ -1417,7 +1418,8 @@ int __init snd_pmac_tumbler_init(struct snd_pmac *chip)
1417 chip->resume = tumbler_resume; 1418 chip->resume = tumbler_resume;
1418#endif 1419#endif
1419 1420
1420 INIT_WORK(&device_change, device_change_handler, (void *)chip); 1421 INIT_WORK(&device_change, device_change_handler);
1422 device_change_chip = chip;
1421 1423
1422#ifdef PMAC_SUPPORT_AUTOMUTE 1424#ifdef PMAC_SUPPORT_AUTOMUTE
1423 if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0) 1425 if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0)