aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpica/Makefile158
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconfig.h9
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h3
-rw-r--r--drivers/acpi/acpica/acglobal.h17
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h26
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h8
-rw-r--r--drivers/acpi/acpica/acopcode.h6
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h41
-rw-r--r--drivers/acpi/acpica/acresrc.h115
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h21
-rw-r--r--drivers/acpi/acpica/amlcode.h29
-rw-r--r--drivers/acpi/acpica/amlresrc.h138
-rw-r--r--drivers/acpi/acpica/dsargs.c18
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c83
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c14
-rw-r--r--drivers/acpi/acpica/evglock.c8
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c31
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c8
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c31
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c9
-rw-r--r--drivers/acpi/acpica/exfield.c30
-rw-r--r--drivers/acpi/acpica/exfldio.c38
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c27
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c27
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c4
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c31
-rw-r--r--drivers/acpi/acpica/nsrepair.c3
-rw-r--r--drivers/acpi/acpica/nsrepair2.c7
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c143
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c15
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c8
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c89
-rw-r--r--drivers/acpi/acpica/rscreate.c69
-rw-r--r--drivers/acpi/acpica/rsdump.c196
-rw-r--r--drivers/acpi/acpica/rsinfo.c58
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c33
-rw-r--r--drivers/acpi/acpica/rslist.c77
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c269
-rw-r--r--drivers/acpi/acpica/rsserial.c441
-rw-r--r--drivers/acpi/acpica/rsutils.c56
-rw-r--r--drivers/acpi/acpica/rsxface.c52
-rw-r--r--drivers/acpi/acpica/tbfadt.c41
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c9
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c294
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c6
-rw-r--r--drivers/acpi/acpica/utdelete.c15
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c8
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c3
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c11
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c278
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utxface.c40
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c187
-rw-r--r--drivers/acpi/apei/apei-base.c150
-rw-r--r--drivers/acpi/apei/apei-internal.h6
-rw-r--r--drivers/acpi/apei/einj.c290
-rw-r--r--drivers/acpi/apei/erst.c5
-rw-r--r--drivers/acpi/apei/ghes.c102
-rw-r--r--drivers/acpi/apei/hest.c5
-rw-r--r--drivers/acpi/atomicio.c77
-rw-r--r--drivers/acpi/numa.c6
-rw-r--r--drivers/acpi/nvs.c53
-rw-r--r--drivers/acpi/osl.c242
-rw-r--r--drivers/acpi/processor_core.c26
-rw-r--r--drivers/acpi/processor_driver.c20
-rw-r--r--drivers/ata/ata_piix.c7
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-transport.c1
-rw-r--r--drivers/ata/pata_bf54x.c167
-rw-r--r--drivers/ata/sata_fsl.c11
-rw-r--r--drivers/bcma/bcma_private.h1
-rw-r--r--drivers/bcma/host_pci.c43
-rw-r--r--drivers/bcma/main.c24
-rw-r--r--drivers/block/Kconfig11
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/nvme.c1739
-rw-r--r--drivers/char/tpm/tpm.c9
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/dma/Kconfig27
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c41
-rw-r--r--drivers/dma/at_hdmac.c103
-rw-r--r--drivers/dma/at_hdmac_regs.h1
-rw-r--r--drivers/dma/coh901318.c12
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/coh901318_lli.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dw_dmac.c83
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/ep93xx_dma.c90
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/imx-dma.c10
-rw-r--r--drivers/dma/imx-sdma.c27
-rw-r--r--drivers/dma/intel_mid_dma.c39
-rw-r--r--drivers/dma/intel_mid_dma_regs.h4
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/ipu/ipu_idmac.c29
-rw-r--r--drivers/dma/mpc512x_dma.c12
-rw-r--r--drivers/dma/mxs-dma.c53
-rw-r--r--drivers/dma/pch_dma.c20
-rw-r--r--drivers/dma/pl330.c31
-rw-r--r--drivers/dma/shdma.c72
-rw-r--r--drivers/dma/sirf-dma.c707
-rw-r--r--drivers/dma/ste_dma40.c441
-rw-r--r--drivers/dma/ste_dma40_ll.h11
-rw-r--r--drivers/dma/timb_dma.c30
-rw-r--r--drivers/dma/txx9dmac.c12
-rw-r--r--drivers/i2c/busses/Kconfig14
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c19
-rw-r--r--drivers/i2c/busses/i2c-omap.c110
-rw-r--r--drivers/idle/intel_idle.c96
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig12
-rw-r--r--drivers/infiniband/ulp/srpt/Makefile2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_dm_mad.h139
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4073
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h444
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c27
-rw-r--r--drivers/media/common/tuners/xc4000.c86
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c41
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c20
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c1
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c150
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c10
-rw-r--r--drivers/media/dvb/frontends/ds3000.c2
-rw-r--r--drivers/media/dvb/frontends/mb86a20s.c8
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.c1
-rw-r--r--drivers/media/video/as3645a.c1
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c41
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c4
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c5
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c7
-rw-r--r--drivers/media/video/cx88/cx88-cards.c24
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c3
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c118
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c22
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.c22
-rw-r--r--drivers/media/video/mx3_camera.c2
-rw-r--r--drivers/media/video/omap/omap_vout.c7
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c239
-rw-r--r--drivers/media/video/pwc/pwc-dec1.c16
-rw-r--r--drivers/media/video/pwc/pwc-dec1.h6
-rw-r--r--drivers/media/video/pwc/pwc-dec23.c41
-rw-r--r--drivers/media/video/pwc/pwc-dec23.h9
-rw-r--r--drivers/media/video/pwc/pwc-if.c175
-rw-r--r--drivers/media/video/pwc/pwc-misc.c1
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c90
-rw-r--r--drivers/media/video/pwc/pwc.h14
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c7
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c6
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c1
-rw-r--r--drivers/media/video/s5p-g2d/g2d.c1
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.c7
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c3
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c2
-rw-r--r--drivers/media/video/saa7164/saa7164-cards.c4
-rw-r--r--drivers/media/video/timblogiw.c2
-rw-r--r--drivers/media/video/tlg2300/pd-main.c4
-rw-r--r--drivers/media/video/v4l2-ctrls.c54
-rw-r--r--drivers/media/video/v4l2-ioctl.c8
-rw-r--r--drivers/media/video/zoran/zoran_driver.c1
-rw-r--r--drivers/misc/carma/carma-fpga-program.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c10
-rw-r--r--drivers/mmc/host/mmci.c11
-rw-r--r--drivers/mmc/host/mxcmmc.c10
-rw-r--r--drivers/mmc/host/mxs-mmc.c10
-rw-r--r--drivers/mmc/host/sh_mmcif.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c298
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c27
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c32
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c14
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c5
-rw-r--r--drivers/net/wireless/b43/main.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c40
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c2
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c28
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h7
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c416
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h7
-rw-r--r--drivers/scsi/bfa/bfa_svc.h5
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c27
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfad_im.c56
-rw-r--r--drivers/scsi/bfa/bfad_im.h27
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c46
-rw-r--r--drivers/scsi/fcoe/fcoe.h4
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c340
-rw-r--r--drivers/scsi/isci/host.h27
-rw-r--r--drivers/scsi/isci/init.c25
-rw-r--r--drivers/scsi/isci/isci.h1
-rw-r--r--drivers/scsi/isci/phy.c172
-rw-r--r--drivers/scsi/isci/port.c104
-rw-r--r--drivers/scsi/isci/port.h10
-rw-r--r--drivers/scsi/isci/port_config.c35
-rw-r--r--drivers/scsi/isci/probe_roms.c2
-rw-r--r--drivers/scsi/isci/probe_roms.h89
-rw-r--r--drivers/scsi/isci/remote_device.c10
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/isci/task.h7
-rw-r--r--drivers/scsi/libfc/fc_disc.c6
-rw-r--r--drivers/scsi/libfc/fc_elsct.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c4
-rw-r--r--drivers/scsi/libfc/fc_lport.c5
-rw-r--r--drivers/scsi/libfc/fc_rport.c10
-rw-r--r--drivers/scsi/megaraid.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c145
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c511
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/sg.c25
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/spi/spi-dw-mid.c8
-rw-r--r--drivers/spi/spi-ep93xx.c9
-rw-r--r--drivers/spi/spi-pl022.c8
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c19
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c36
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c23
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c8
-rw-r--r--drivers/target/loopback/tcm_loop.c33
-rw-r--r--drivers/target/loopback/tcm_loop.h11
-rw-r--r--drivers/target/target_core_alua.c7
-rw-r--r--drivers/target/target_core_cdb.c39
-rw-r--r--drivers/target/target_core_cdb.h14
-rw-r--r--drivers/target/target_core_configfs.c34
-rw-r--r--drivers/target/target_core_device.c15
-rw-r--r--drivers/target/target_core_fabric_configfs.c8
-rw-r--r--drivers/target/target_core_fabric_lib.c13
-rw-r--r--drivers/target/target_core_file.c15
-rw-r--r--drivers/target/target_core_hba.c7
-rw-r--r--drivers/target/target_core_hba.h7
-rw-r--r--drivers/target/target_core_iblock.c7
-rw-r--r--drivers/target/target_core_internal.h123
-rw-r--r--drivers/target/target_core_pr.c24
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c20
-rw-r--r--drivers/target/target_core_rd.c6
-rw-r--r--drivers/target/target_core_stat.c9
-rw-r--r--drivers/target/target_core_stat.h8
-rw-r--r--drivers/target/target_core_tmr.c36
-rw-r--r--drivers/target/target_core_tpg.c9
-rw-r--r--drivers/target/target_core_transport.c343
-rw-r--r--drivers/target/target_core_ua.c6
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c57
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/target/tcm_fc/tfc_io.c5
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c5
-rw-r--r--drivers/tty/serial/amba-pl011.c8
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/sh-sci.c4
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/musb/ux500_dma.c4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/video/mx3fb.c65
-rw-r--r--drivers/xen/biomerge.c2
-rw-r--r--drivers/xen/xen-balloon.c2
403 files changed, 14806 insertions, 4173 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ecb26b4f29a..c07f44f05f9 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -20,11 +20,12 @@ obj-y += acpi.o \
20# All the builtin files are in the "acpi." module_param namespace. 20# All the builtin files are in the "acpi." module_param namespace.
21acpi-y += osl.o utils.o reboot.o 21acpi-y += osl.o utils.o reboot.o
22acpi-y += atomicio.o 22acpi-y += atomicio.o
23acpi-y += nvs.o
23 24
24# sleep related files 25# sleep related files
25acpi-y += wakeup.o 26acpi-y += wakeup.o
26acpi-y += sleep.o 27acpi-y += sleep.o
27acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o 28acpi-$(CONFIG_ACPI_SLEEP) += proc.o
28 29
29 30
30# 31#
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 301bd2d388a..0ca208b6dcf 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -8,41 +8,151 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
8# use acpi.o to put all files here into acpi.o modparam namespace 8# use acpi.o to put all files here into acpi.o modparam namespace
9obj-y += acpi.o 9obj-y += acpi.o
10 10
11acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ 11acpi-y := \
12 dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \ 12 dsargs.o \
13 dsinit.o dsargs.o dscontrol.o dswload2.o 13 dscontrol.o \
14 dsfield.o \
15 dsinit.o \
16 dsmethod.o \
17 dsmthdat.o \
18 dsobject.o \
19 dsopcode.o \
20 dsutils.o \
21 dswexec.o \
22 dswload.o \
23 dswload2.o \
24 dswscope.o \
25 dswstate.o
14 26
15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ 27acpi-y += \
16 evmisc.o evrgnini.o evxface.o evxfregn.o \ 28 evevent.o \
17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o 29 evgpe.o \
30 evgpeblk.o \
31 evgpeinit.o \
32 evgpeutil.o \
33 evglock.o \
34 evmisc.o \
35 evregion.o \
36 evrgnini.o \
37 evsci.o \
38 evxface.o \
39 evxfevnt.o \
40 evxfgpe.o \
41 evxfregn.o
18 42
19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ 43acpi-y += \
20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ 44 exconfig.o \
21 excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \ 45 exconvrt.o \
22 exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o 46 excreate.o \
47 exdebug.o \
48 exdump.o \
49 exfield.o \
50 exfldio.o \
51 exmutex.o \
52 exnames.o \
53 exoparg1.o \
54 exoparg2.o \
55 exoparg3.o \
56 exoparg6.o \
57 exprep.o \
58 exmisc.o \
59 exregion.o \
60 exresnte.o \
61 exresolv.o \
62 exresop.o \
63 exstore.o \
64 exstoren.o \
65 exstorob.o \
66 exsystem.o \
67 exutils.o
23 68
24acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o hwpci.o 69acpi-y += \
70 hwacpi.o \
71 hwgpe.o \
72 hwpci.o \
73 hwregs.o \
74 hwsleep.o \
75 hwvalid.o \
76 hwxface.o
25 77
26acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o 78acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
27 79
28acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \ 80acpi-y += \
29 nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \ 81 nsaccess.o \
30 nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \ 82 nsalloc.o \
31 nsparse.o nspredef.o nsrepair.o nsrepair2.o 83 nsdump.o \
84 nseval.o \
85 nsinit.o \
86 nsload.o \
87 nsnames.o \
88 nsobject.o \
89 nsparse.o \
90 nspredef.o \
91 nsrepair.o \
92 nsrepair2.o \
93 nssearch.o \
94 nsutils.o \
95 nswalk.o \
96 nsxfeval.o \
97 nsxfname.o \
98 nsxfobj.o
32 99
33acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o 100acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
34 101
35acpi-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \ 102acpi-y += \
36 psopcode.o psscope.o psutils.o psxface.o 103 psargs.o \
104 psloop.o \
105 psopcode.o \
106 psparse.o \
107 psscope.o \
108 pstree.o \
109 psutils.o \
110 pswalk.o \
111 psxface.o
37 112
38acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \ 113acpi-y += \
39 rscalc.o rsirq.o rsmemory.o rsutils.o 114 rsaddr.o \
115 rscalc.o \
116 rscreate.o \
117 rsinfo.o \
118 rsio.o \
119 rsirq.o \
120 rslist.o \
121 rsmemory.o \
122 rsmisc.o \
123 rsserial.o \
124 rsutils.o \
125 rsxface.o
40 126
41acpi-$(ACPI_FUTURE_USAGE) += rsdump.o 127acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
42 128
43acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o 129acpi-y += \
130 tbfadt.o \
131 tbfind.o \
132 tbinstal.o \
133 tbutils.o \
134 tbxface.o \
135 tbxfroot.o
44 136
45acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \ 137acpi-y += \
46 utcopy.o utdelete.o utglobal.o utmath.o utobject.o \ 138 utaddress.o \
47 utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \ 139 utalloc.o \
48 utosi.o utxferror.o utdecode.o 140 utcopy.o \
141 utdebug.o \
142 utdecode.o \
143 utdelete.o \
144 uteval.o \
145 utglobal.o \
146 utids.o \
147 utinit.o \
148 utlock.o \
149 utmath.o \
150 utmisc.o \
151 utmutex.o \
152 utobject.o \
153 utosi.o \
154 utresrc.o \
155 utstate.o \
156 utxface.o \
157 utxferror.o \
158 utxfmutex.o
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index e0ba17f0a7c..a44bd424f9f 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index f895a244ca7..1f30af613e8 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -123,6 +123,10 @@
123 123
124#define ACPI_MAX_SLEEP 2000 /* Two seconds */ 124#define ACPI_MAX_SLEEP 2000 /* Two seconds */
125 125
126/* Address Range lists are per-space_id (Memory and I/O only) */
127
128#define ACPI_ADDRESS_RANGE_MAX 2
129
126/****************************************************************************** 130/******************************************************************************
127 * 131 *
128 * ACPI Specification constants (Do not change unless the specification changes) 132 * ACPI Specification constants (Do not change unless the specification changes)
@@ -202,9 +206,10 @@
202#define ACPI_RSDP_CHECKSUM_LENGTH 20 206#define ACPI_RSDP_CHECKSUM_LENGTH 20
203#define ACPI_RSDP_XCHECKSUM_LENGTH 36 207#define ACPI_RSDP_XCHECKSUM_LENGTH 36
204 208
205/* SMBus and IPMI bidirectional buffer size */ 209/* SMBus, GSBus and IPMI bidirectional buffer size */
206 210
207#define ACPI_SMBUS_BUFFER_SIZE 34 211#define ACPI_SMBUS_BUFFER_SIZE 34
212#define ACPI_GSBUS_BUFFER_SIZE 34
208#define ACPI_IPMI_BUFFER_SIZE 66 213#define ACPI_IPMI_BUFFER_SIZE 66
209 214
210/* _sx_d and _sx_w control methods */ 215/* _sx_d and _sx_w control methods */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index eb0b1f8dee6..deaa8197956 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 2d1b7ffa377..5935ba6707e 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index bea3b489918..c53caa521a3 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -162,6 +162,7 @@ acpi_status acpi_ev_initialize_op_regions(void);
162 162
163acpi_status 163acpi_status
164acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, 164acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
165 union acpi_operand_object *field_obj,
165 u32 function, 166 u32 function,
166 u32 region_offset, u32 bit_width, u64 *value); 167 u32 region_offset, u32 bit_width, u64 *value);
167 168
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index e6652d716e4..2853f7673f3 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -140,8 +140,19 @@ u32 acpi_gbl_trace_flags;
140acpi_name acpi_gbl_trace_method_name; 140acpi_name acpi_gbl_trace_method_name;
141u8 acpi_gbl_system_awake_and_running; 141u8 acpi_gbl_system_awake_and_running;
142 142
143/*
144 * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
145 * that the ACPI hardware is no longer required. A flag in the FADT indicates
146 * a reduced HW machine, and that flag is duplicated here for convenience.
147 */
148u8 acpi_gbl_reduced_hardware;
149
143#endif 150#endif
144 151
152/* Do not disassemble buffers to resource descriptors */
153
154ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
155
145/***************************************************************************** 156/*****************************************************************************
146 * 157 *
147 * Debug support 158 * Debug support
@@ -207,7 +218,7 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
207 218
208/***************************************************************************** 219/*****************************************************************************
209 * 220 *
210 * Mutual exlusion within ACPICA subsystem 221 * Mutual exclusion within ACPICA subsystem
211 * 222 *
212 ****************************************************************************/ 223 ****************************************************************************/
213 224
@@ -295,6 +306,8 @@ ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
295ACPI_EXTERN u8 acpi_gbl_events_initialized; 306ACPI_EXTERN u8 acpi_gbl_events_initialized;
296ACPI_EXTERN u8 acpi_gbl_osi_data; 307ACPI_EXTERN u8 acpi_gbl_osi_data;
297ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces; 308ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
309ACPI_EXTERN struct acpi_address_range
310 *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
298 311
299#ifndef DEFINE_ACPI_GLOBALS 312#ifndef DEFINE_ACPI_GLOBALS
300 313
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index e7213beaafc..677793e938f 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 3731e1c34b8..eb308635da7 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -468,6 +468,8 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id);
468 468
469void acpi_ex_integer_to_string(char *dest, u64 value); 469void acpi_ex_integer_to_string(char *dest, u64 value);
470 470
471u8 acpi_is_valid_space_id(u8 space_id);
472
471/* 473/*
472 * exregion - default op_region handlers 474 * exregion - default op_region handlers
473 */ 475 */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 5552125d834..3f24068837d 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
53 53
54/* Total number of aml opcodes defined */ 54/* Total number of aml opcodes defined */
55 55
56#define AML_NUM_OPCODES 0x7F 56#define AML_NUM_OPCODES 0x81
57 57
58/* Forward declarations */ 58/* Forward declarations */
59 59
@@ -249,12 +249,16 @@ struct acpi_create_field_info {
249 struct acpi_namespace_node *field_node; 249 struct acpi_namespace_node *field_node;
250 struct acpi_namespace_node *register_node; 250 struct acpi_namespace_node *register_node;
251 struct acpi_namespace_node *data_register_node; 251 struct acpi_namespace_node *data_register_node;
252 struct acpi_namespace_node *connection_node;
253 u8 *resource_buffer;
252 u32 bank_value; 254 u32 bank_value;
253 u32 field_bit_position; 255 u32 field_bit_position;
254 u32 field_bit_length; 256 u32 field_bit_length;
257 u16 resource_length;
255 u8 field_flags; 258 u8 field_flags;
256 u8 attribute; 259 u8 attribute;
257 u8 field_type; 260 u8 field_type;
261 u8 access_length;
258}; 262};
259 263
260typedef 264typedef
@@ -315,7 +319,8 @@ struct acpi_name_info {
315 319
316/* 320/*
317 * Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2, 321 * Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
318 * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT 322 * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT,
323 * ACPI_PTYPE2_FIX_VAR
319 */ 324 */
320struct acpi_package_info { 325struct acpi_package_info {
321 u8 type; 326 u8 type;
@@ -625,6 +630,15 @@ union acpi_generic_state {
625 630
626typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state); 631typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
627 632
633/* Address Range info block */
634
635struct acpi_address_range {
636 struct acpi_address_range *next;
637 struct acpi_namespace_node *region_node;
638 acpi_physical_address start_address;
639 acpi_physical_address end_address;
640};
641
628/***************************************************************************** 642/*****************************************************************************
629 * 643 *
630 * Parser typedefs and structs 644 * Parser typedefs and structs
@@ -951,7 +965,7 @@ struct acpi_port_info {
951#define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38 965#define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38
952#define ACPI_RESOURCE_NAME_IO 0x40 966#define ACPI_RESOURCE_NAME_IO 0x40
953#define ACPI_RESOURCE_NAME_FIXED_IO 0x48 967#define ACPI_RESOURCE_NAME_FIXED_IO 0x48
954#define ACPI_RESOURCE_NAME_RESERVED_S1 0x50 968#define ACPI_RESOURCE_NAME_FIXED_DMA 0x50
955#define ACPI_RESOURCE_NAME_RESERVED_S2 0x58 969#define ACPI_RESOURCE_NAME_RESERVED_S2 0x58
956#define ACPI_RESOURCE_NAME_RESERVED_S3 0x60 970#define ACPI_RESOURCE_NAME_RESERVED_S3 0x60
957#define ACPI_RESOURCE_NAME_RESERVED_S4 0x68 971#define ACPI_RESOURCE_NAME_RESERVED_S4 0x68
@@ -973,7 +987,9 @@ struct acpi_port_info {
973#define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89 987#define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89
974#define ACPI_RESOURCE_NAME_ADDRESS64 0x8A 988#define ACPI_RESOURCE_NAME_ADDRESS64 0x8A
975#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B 989#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B
976#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8B 990#define ACPI_RESOURCE_NAME_GPIO 0x8C
991#define ACPI_RESOURCE_NAME_SERIAL_BUS 0x8E
992#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8E
977 993
978/***************************************************************************** 994/*****************************************************************************
979 * 995 *
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index b7491ee1fba..ef338a96f5b 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 79a598c67fe..2c9e0f04952 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 1055769f2f0..c065078ca83 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
254 u32 base_byte_offset; /* Byte offset within containing object */\ 254 u32 base_byte_offset; /* Byte offset within containing object */\
255 u32 value; /* Value to store into the Bank or Index register */\ 255 u32 value; /* Value to store into the Bank or Index register */\
256 u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\ 256 u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
257 u8 access_length; /* For serial regions/fields */
257 258
258 259
259struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */ 260struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
@@ -261,7 +262,9 @@ struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and
261}; 262};
262 263
263struct acpi_object_region_field { 264struct acpi_object_region_field {
264 ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */ 265 ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
266 union acpi_operand_object *region_obj; /* Containing op_region object */
267 u8 *resource_buffer; /* resource_template for serial regions/fields */
265}; 268};
266 269
267struct acpi_object_bank_field { 270struct acpi_object_bank_field {
@@ -358,6 +361,7 @@ typedef enum {
358 */ 361 */
359struct acpi_object_extra { 362struct acpi_object_extra {
360 ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ 363 ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
364 struct acpi_namespace_node *scope_node;
361 void *region_context; /* Region-specific data */ 365 void *region_context; /* Region-specific data */
362 u8 *aml_start; 366 u8 *aml_start;
363 u32 aml_length; 367 u32 aml_length;
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index bb2ccfad737..9440d053fbb 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,7 @@
93#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) 93#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
94#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) 94#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
95#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME) 95#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
96#define ARGP_CONNECTFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
96#define ARGP_CONTINUE_OP ARG_NONE 97#define ARGP_CONTINUE_OP ARG_NONE
97#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME) 98#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
98#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME) 99#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
@@ -164,6 +165,7 @@
164#define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG) 165#define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG)
165#define ARGP_REVISION_OP ARG_NONE 166#define ARGP_REVISION_OP ARG_NONE
166#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST) 167#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST)
168#define ARGP_SERIALFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
167#define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) 169#define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
168#define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) 170#define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
169#define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME) 171#define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME)
@@ -223,6 +225,7 @@
223#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF) 225#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF)
224#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF) 226#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF)
225#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF) 227#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
228#define ARGI_CONNECTFIELD_OP ARGI_INVALID_OPCODE
226#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE 229#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE
227#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET) 230#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET)
228#define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE) 231#define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
@@ -294,6 +297,7 @@
294#define ARGI_RETURN_OP ARGI_INVALID_OPCODE 297#define ARGI_RETURN_OP ARGI_INVALID_OPCODE
295#define ARGI_REVISION_OP ARG_NONE 298#define ARGI_REVISION_OP ARG_NONE
296#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE 299#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE
300#define ARGI_SERIALFIELD_OP ARGI_INVALID_OPCODE
297#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) 301#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
298#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) 302#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
299#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT) 303#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT)
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 5ea1e06afa2..b725d780d34 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index c445cca490e..bbb34c9be4e 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -94,6 +94,14 @@
94 * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length 94 * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
95 * (Used for _ART, _FPS) 95 * (Used for _ART, _FPS)
96 * 96 *
97 * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
98 * followed by an optional element
99 * object type
100 * count
101 * object type
102 * count = 0 (optional)
103 * (Used for _DLM)
104 *
97 *****************************************************************************/ 105 *****************************************************************************/
98 106
99enum acpi_return_package_types { 107enum acpi_return_package_types {
@@ -105,7 +113,8 @@ enum acpi_return_package_types {
105 ACPI_PTYPE2_PKG_COUNT = 6, 113 ACPI_PTYPE2_PKG_COUNT = 6,
106 ACPI_PTYPE2_FIXED = 7, 114 ACPI_PTYPE2_FIXED = 7,
107 ACPI_PTYPE2_MIN = 8, 115 ACPI_PTYPE2_MIN = 8,
108 ACPI_PTYPE2_REV_FIXED = 9 116 ACPI_PTYPE2_REV_FIXED = 9,
117 ACPI_PTYPE2_FIX_VAR = 10
109}; 118};
110 119
111#ifdef ACPI_CREATE_PREDEFINED_TABLE 120#ifdef ACPI_CREATE_PREDEFINED_TABLE
@@ -154,6 +163,7 @@ static const union acpi_predefined_info predefined_names[] =
154 {{"_AC8", 0, ACPI_RTYPE_INTEGER}}, 163 {{"_AC8", 0, ACPI_RTYPE_INTEGER}},
155 {{"_AC9", 0, ACPI_RTYPE_INTEGER}}, 164 {{"_AC9", 0, ACPI_RTYPE_INTEGER}},
156 {{"_ADR", 0, ACPI_RTYPE_INTEGER}}, 165 {{"_ADR", 0, ACPI_RTYPE_INTEGER}},
166 {{"_AEI", 0, ACPI_RTYPE_BUFFER}},
157 {{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 167 {{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
158 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 168 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
159 169
@@ -229,6 +239,13 @@ static const union acpi_predefined_info predefined_names[] =
229 {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */ 239 {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
230 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}}, 240 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
231 241
242 {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
243 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
244
245 {{"_CPC", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Bufs) */
246 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
247 0}},
248
232 {{"_CRS", 0, ACPI_RTYPE_BUFFER}}, 249 {{"_CRS", 0, ACPI_RTYPE_BUFFER}},
233 {{"_CRT", 0, ACPI_RTYPE_INTEGER}}, 250 {{"_CRT", 0, ACPI_RTYPE_INTEGER}},
234 {{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */ 251 {{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
@@ -237,12 +254,21 @@ static const union acpi_predefined_info predefined_names[] =
237 {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */ 254 {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
238 {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}}, 255 {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
239 256
257 {{"_CWS", 1, ACPI_RTYPE_INTEGER}},
240 {{"_DCK", 1, ACPI_RTYPE_INTEGER}}, 258 {{"_DCK", 1, ACPI_RTYPE_INTEGER}},
241 {{"_DCS", 0, ACPI_RTYPE_INTEGER}}, 259 {{"_DCS", 0, ACPI_RTYPE_INTEGER}},
242 {{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}}, 260 {{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
243 {{"_DDN", 0, ACPI_RTYPE_STRING}}, 261 {{"_DDN", 0, ACPI_RTYPE_STRING}},
262 {{"_DEP", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
263 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
264
244 {{"_DGS", 0, ACPI_RTYPE_INTEGER}}, 265 {{"_DGS", 0, ACPI_RTYPE_INTEGER}},
245 {{"_DIS", 0, 0}}, 266 {{"_DIS", 0, 0}},
267
268 {{"_DLM", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
269 {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
270 ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
271
246 {{"_DMA", 0, ACPI_RTYPE_BUFFER}}, 272 {{"_DMA", 0, ACPI_RTYPE_BUFFER}},
247 {{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */ 273 {{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
248 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}}, 274 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
@@ -262,6 +288,7 @@ static const union acpi_predefined_info predefined_names[] =
262 {{"_EJ3", 1, 0}}, 288 {{"_EJ3", 1, 0}},
263 {{"_EJ4", 1, 0}}, 289 {{"_EJ4", 1, 0}},
264 {{"_EJD", 0, ACPI_RTYPE_STRING}}, 290 {{"_EJD", 0, ACPI_RTYPE_STRING}},
291 {{"_EVT", 1, 0}},
265 {{"_FDE", 0, ACPI_RTYPE_BUFFER}}, 292 {{"_FDE", 0, ACPI_RTYPE_BUFFER}},
266 {{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */ 293 {{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
267 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}}, 294 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
@@ -281,14 +308,17 @@ static const union acpi_predefined_info predefined_names[] =
281 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}}, 308 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
282 309
283 {{"_GAI", 0, ACPI_RTYPE_INTEGER}}, 310 {{"_GAI", 0, ACPI_RTYPE_INTEGER}},
311 {{"_GCP", 0, ACPI_RTYPE_INTEGER}},
284 {{"_GHL", 0, ACPI_RTYPE_INTEGER}}, 312 {{"_GHL", 0, ACPI_RTYPE_INTEGER}},
285 {{"_GLK", 0, ACPI_RTYPE_INTEGER}}, 313 {{"_GLK", 0, ACPI_RTYPE_INTEGER}},
286 {{"_GPD", 0, ACPI_RTYPE_INTEGER}}, 314 {{"_GPD", 0, ACPI_RTYPE_INTEGER}},
287 {{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */ 315 {{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
316 {{"_GRT", 0, ACPI_RTYPE_BUFFER}},
288 {{"_GSB", 0, ACPI_RTYPE_INTEGER}}, 317 {{"_GSB", 0, ACPI_RTYPE_INTEGER}},
289 {{"_GTF", 0, ACPI_RTYPE_BUFFER}}, 318 {{"_GTF", 0, ACPI_RTYPE_BUFFER}},
290 {{"_GTM", 0, ACPI_RTYPE_BUFFER}}, 319 {{"_GTM", 0, ACPI_RTYPE_BUFFER}},
291 {{"_GTS", 1, 0}}, 320 {{"_GTS", 1, 0}},
321 {{"_GWS", 1, ACPI_RTYPE_INTEGER}},
292 {{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}}, 322 {{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
293 {{"_HOT", 0, ACPI_RTYPE_INTEGER}}, 323 {{"_HOT", 0, ACPI_RTYPE_INTEGER}},
294 {{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */ 324 {{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
@@ -303,6 +333,7 @@ static const union acpi_predefined_info predefined_names[] =
303 {{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */ 333 {{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
304 {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}}, 334 {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
305 335
336 {{"_HRV", 0, ACPI_RTYPE_INTEGER}},
306 {{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */ 337 {{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
307 {{"_INI", 0, 0}}, 338 {{"_INI", 0, 0}},
308 {{"_IRC", 0, 0}}, 339 {{"_IRC", 0, 0}},
@@ -361,6 +392,9 @@ static const union acpi_predefined_info predefined_names[] =
361 {{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 392 {{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
362 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, 393 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
363 394
395 {{"_PRE", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
396 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
397
364 {{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 398 {{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
365 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, 399 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
366 400
@@ -391,6 +425,7 @@ static const union acpi_predefined_info predefined_names[] =
391 {{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */ 425 {{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
392 {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}}, 426 {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
393 427
428 {{"_PSE", 1, 0}},
394 {{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 429 {{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
395 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 430 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
396 431
@@ -457,6 +492,7 @@ static const union acpi_predefined_info predefined_names[] =
457 {{"_SLI", 0, ACPI_RTYPE_BUFFER}}, 492 {{"_SLI", 0, ACPI_RTYPE_BUFFER}},
458 {{"_SPD", 1, ACPI_RTYPE_INTEGER}}, 493 {{"_SPD", 1, ACPI_RTYPE_INTEGER}},
459 {{"_SRS", 1, 0}}, 494 {{"_SRS", 1, 0}},
495 {{"_SRT", 1, ACPI_RTYPE_INTEGER}},
460 {{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */ 496 {{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
461 {{"_SST", 1, 0}}, 497 {{"_SST", 1, 0}},
462 {{"_STA", 0, ACPI_RTYPE_INTEGER}}, 498 {{"_STA", 0, ACPI_RTYPE_INTEGER}},
@@ -464,6 +500,7 @@ static const union acpi_predefined_info predefined_names[] =
464 {{"_STP", 2, ACPI_RTYPE_INTEGER}}, 500 {{"_STP", 2, ACPI_RTYPE_INTEGER}},
465 {{"_STR", 0, ACPI_RTYPE_BUFFER}}, 501 {{"_STR", 0, ACPI_RTYPE_BUFFER}},
466 {{"_STV", 2, ACPI_RTYPE_INTEGER}}, 502 {{"_STV", 2, ACPI_RTYPE_INTEGER}},
503 {{"_SUB", 0, ACPI_RTYPE_STRING}},
467 {{"_SUN", 0, ACPI_RTYPE_INTEGER}}, 504 {{"_SUN", 0, ACPI_RTYPE_INTEGER}},
468 {{"_SWS", 0, ACPI_RTYPE_INTEGER}}, 505 {{"_SWS", 0, ACPI_RTYPE_INTEGER}},
469 {{"_TC1", 0, ACPI_RTYPE_INTEGER}}, 506 {{"_TC1", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index f08b55b7f3a..0347d099349 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -73,28 +73,40 @@ typedef const struct acpi_rsconvert_info {
73 73
74/* Resource conversion opcodes */ 74/* Resource conversion opcodes */
75 75
76#define ACPI_RSC_INITGET 0 76typedef enum {
77#define ACPI_RSC_INITSET 1 77 ACPI_RSC_INITGET = 0,
78#define ACPI_RSC_FLAGINIT 2 78 ACPI_RSC_INITSET,
79#define ACPI_RSC_1BITFLAG 3 79 ACPI_RSC_FLAGINIT,
80#define ACPI_RSC_2BITFLAG 4 80 ACPI_RSC_1BITFLAG,
81#define ACPI_RSC_COUNT 5 81 ACPI_RSC_2BITFLAG,
82#define ACPI_RSC_COUNT16 6 82 ACPI_RSC_3BITFLAG,
83#define ACPI_RSC_LENGTH 7 83 ACPI_RSC_ADDRESS,
84#define ACPI_RSC_MOVE8 8 84 ACPI_RSC_BITMASK,
85#define ACPI_RSC_MOVE16 9 85 ACPI_RSC_BITMASK16,
86#define ACPI_RSC_MOVE32 10 86 ACPI_RSC_COUNT,
87#define ACPI_RSC_MOVE64 11 87 ACPI_RSC_COUNT16,
88#define ACPI_RSC_SET8 12 88 ACPI_RSC_COUNT_GPIO_PIN,
89#define ACPI_RSC_DATA8 13 89 ACPI_RSC_COUNT_GPIO_RES,
90#define ACPI_RSC_ADDRESS 14 90 ACPI_RSC_COUNT_GPIO_VEN,
91#define ACPI_RSC_SOURCE 15 91 ACPI_RSC_COUNT_SERIAL_RES,
92#define ACPI_RSC_SOURCEX 16 92 ACPI_RSC_COUNT_SERIAL_VEN,
93#define ACPI_RSC_BITMASK 17 93 ACPI_RSC_DATA8,
94#define ACPI_RSC_BITMASK16 18 94 ACPI_RSC_EXIT_EQ,
95#define ACPI_RSC_EXIT_NE 19 95 ACPI_RSC_EXIT_LE,
96#define ACPI_RSC_EXIT_LE 20 96 ACPI_RSC_EXIT_NE,
97#define ACPI_RSC_EXIT_EQ 21 97 ACPI_RSC_LENGTH,
98 ACPI_RSC_MOVE_GPIO_PIN,
99 ACPI_RSC_MOVE_GPIO_RES,
100 ACPI_RSC_MOVE_SERIAL_RES,
101 ACPI_RSC_MOVE_SERIAL_VEN,
102 ACPI_RSC_MOVE8,
103 ACPI_RSC_MOVE16,
104 ACPI_RSC_MOVE32,
105 ACPI_RSC_MOVE64,
106 ACPI_RSC_SET8,
107 ACPI_RSC_SOURCE,
108 ACPI_RSC_SOURCEX
109} ACPI_RSCONVERT_OPCODES;
98 110
99/* Resource Conversion sub-opcodes */ 111/* Resource Conversion sub-opcodes */
100 112
@@ -106,6 +118,9 @@ typedef const struct acpi_rsconvert_info {
106#define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f) 118#define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f)
107#define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f) 119#define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f)
108 120
121/*
122 * Individual entry for the resource dump tables
123 */
109typedef const struct acpi_rsdump_info { 124typedef const struct acpi_rsdump_info {
110 u8 opcode; 125 u8 opcode;
111 u8 offset; 126 u8 offset;
@@ -116,20 +131,25 @@ typedef const struct acpi_rsdump_info {
116 131
117/* Values for the Opcode field above */ 132/* Values for the Opcode field above */
118 133
119#define ACPI_RSD_TITLE 0 134typedef enum {
120#define ACPI_RSD_LITERAL 1 135 ACPI_RSD_TITLE = 0,
121#define ACPI_RSD_STRING 2 136 ACPI_RSD_1BITFLAG,
122#define ACPI_RSD_UINT8 3 137 ACPI_RSD_2BITFLAG,
123#define ACPI_RSD_UINT16 4 138 ACPI_RSD_3BITFLAG,
124#define ACPI_RSD_UINT32 5 139 ACPI_RSD_ADDRESS,
125#define ACPI_RSD_UINT64 6 140 ACPI_RSD_DWORDLIST,
126#define ACPI_RSD_1BITFLAG 7 141 ACPI_RSD_LITERAL,
127#define ACPI_RSD_2BITFLAG 8 142 ACPI_RSD_LONGLIST,
128#define ACPI_RSD_SHORTLIST 9 143 ACPI_RSD_SHORTLIST,
129#define ACPI_RSD_LONGLIST 10 144 ACPI_RSD_SHORTLISTX,
130#define ACPI_RSD_DWORDLIST 11 145 ACPI_RSD_SOURCE,
131#define ACPI_RSD_ADDRESS 12 146 ACPI_RSD_STRING,
132#define ACPI_RSD_SOURCE 13 147 ACPI_RSD_UINT8,
148 ACPI_RSD_UINT16,
149 ACPI_RSD_UINT32,
150 ACPI_RSD_UINT64,
151 ACPI_RSD_WORDLIST
152} ACPI_RSDUMP_OPCODES;
133 153
134/* restore default alignment */ 154/* restore default alignment */
135 155
@@ -138,13 +158,18 @@ typedef const struct acpi_rsdump_info {
138/* Resource tables indexed by internal resource type */ 158/* Resource tables indexed by internal resource type */
139 159
140extern const u8 acpi_gbl_aml_resource_sizes[]; 160extern const u8 acpi_gbl_aml_resource_sizes[];
161extern const u8 acpi_gbl_aml_resource_serial_bus_sizes[];
141extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[]; 162extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[];
142 163
143/* Resource tables indexed by raw AML resource descriptor type */ 164/* Resource tables indexed by raw AML resource descriptor type */
144 165
145extern const u8 acpi_gbl_resource_struct_sizes[]; 166extern const u8 acpi_gbl_resource_struct_sizes[];
167extern const u8 acpi_gbl_resource_struct_serial_bus_sizes[];
146extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[]; 168extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[];
147 169
170extern struct acpi_rsconvert_info
171 *acpi_gbl_convert_resource_serial_bus_dispatch[];
172
148struct acpi_vendor_walk_info { 173struct acpi_vendor_walk_info {
149 struct acpi_vendor_uuid *uuid; 174 struct acpi_vendor_uuid *uuid;
150 struct acpi_buffer *buffer; 175 struct acpi_buffer *buffer;
@@ -190,6 +215,10 @@ acpi_status
190acpi_rs_set_srs_method_data(struct acpi_namespace_node *node, 215acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
191 struct acpi_buffer *ret_buffer); 216 struct acpi_buffer *ret_buffer);
192 217
218acpi_status
219acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
220 struct acpi_buffer *ret_buffer);
221
193/* 222/*
194 * rscalc 223 * rscalc
195 */ 224 */
@@ -293,6 +322,11 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address16[];
293extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[]; 322extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[];
294extern struct acpi_rsconvert_info acpi_rs_convert_address64[]; 323extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
295extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[]; 324extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
325extern struct acpi_rsconvert_info acpi_rs_convert_gpio[];
326extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
327extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
328extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
329extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
296 330
297/* These resources require separate get/set tables */ 331/* These resources require separate get/set tables */
298 332
@@ -310,6 +344,7 @@ extern struct acpi_rsconvert_info acpi_rs_set_vendor[];
310 * rsinfo 344 * rsinfo
311 */ 345 */
312extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[]; 346extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
347extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
313 348
314/* 349/*
315 * rsdump 350 * rsdump
@@ -331,6 +366,12 @@ extern struct acpi_rsdump_info acpi_rs_dump_address64[];
331extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[]; 366extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
332extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[]; 367extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
333extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[]; 368extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
369extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
370extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
371extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
372extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
373extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
374extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
334#endif 375#endif
335 376
336#endif /* __ACRESRC_H__ */ 377#endif /* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 1623b245dde..0404df605bc 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 967f08124eb..d5bec304c82 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 99c140d8e34..925ccf22101 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
45#define _ACUTILS_H 45#define _ACUTILS_H
46 46
47extern const u8 acpi_gbl_resource_aml_sizes[]; 47extern const u8 acpi_gbl_resource_aml_sizes[];
48extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
48 49
49/* Strings used by the disassembler and debugger resource dump routines */ 50/* Strings used by the disassembler and debugger resource dump routines */
50 51
@@ -579,6 +580,24 @@ acpi_ut_create_list(char *list_name,
579#endif /* ACPI_DBG_TRACK_ALLOCATIONS */ 580#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
580 581
581/* 582/*
583 * utaddress - address range check
584 */
585acpi_status
586acpi_ut_add_address_range(acpi_adr_space_type space_id,
587 acpi_physical_address address,
588 u32 length, struct acpi_namespace_node *region_node);
589
590void
591acpi_ut_remove_address_range(acpi_adr_space_type space_id,
592 struct acpi_namespace_node *region_node);
593
594u32
595acpi_ut_check_address_range(acpi_adr_space_type space_id,
596 acpi_physical_address address, u32 length, u8 warn);
597
598void acpi_ut_delete_address_lists(void);
599
600/*
582 * utxferror - various error/warning output functions 601 * utxferror - various error/warning output functions
583 */ 602 */
584void ACPI_INTERNAL_VAR_XFACE 603void ACPI_INTERNAL_VAR_XFACE
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1077f17859e..905280fec0f 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2011, Intel Corp. 10 * Copyright (C) 2000 - 2012, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
@@ -189,6 +189,14 @@
189#define AML_LNOTEQUAL_OP (u16) 0x9293 189#define AML_LNOTEQUAL_OP (u16) 0x9293
190 190
191/* 191/*
192 * Opcodes for "Field" operators
193 */
194#define AML_FIELD_OFFSET_OP (u8) 0x00
195#define AML_FIELD_ACCESS_OP (u8) 0x01
196#define AML_FIELD_CONNECTION_OP (u8) 0x02 /* ACPI 5.0 */
197#define AML_FIELD_EXT_ACCESS_OP (u8) 0x03 /* ACPI 5.0 */
198
199/*
192 * Internal opcodes 200 * Internal opcodes
193 * Use only "Unknown" AML opcodes, don't attempt to use 201 * Use only "Unknown" AML opcodes, don't attempt to use
194 * any valid ACPI ASCII values (A-Z, 0-9, '-') 202 * any valid ACPI ASCII values (A-Z, 0-9, '-')
@@ -202,6 +210,8 @@
202#define AML_INT_METHODCALL_OP (u16) 0x0035 210#define AML_INT_METHODCALL_OP (u16) 0x0035
203#define AML_INT_RETURN_VALUE_OP (u16) 0x0036 211#define AML_INT_RETURN_VALUE_OP (u16) 0x0036
204#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037 212#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037
213#define AML_INT_CONNECTION_OP (u16) 0x0038
214#define AML_INT_EXTACCESSFIELD_OP (u16) 0x0039
205 215
206#define ARG_NONE 0x0 216#define ARG_NONE 0x0
207 217
@@ -456,13 +466,16 @@ typedef enum {
456 * access_as keyword 466 * access_as keyword
457 */ 467 */
458typedef enum { 468typedef enum {
459 AML_FIELD_ATTRIB_SMB_QUICK = 0x02, 469 AML_FIELD_ATTRIB_QUICK = 0x02,
460 AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04, 470 AML_FIELD_ATTRIB_SEND_RCV = 0x04,
461 AML_FIELD_ATTRIB_SMB_BYTE = 0x06, 471 AML_FIELD_ATTRIB_BYTE = 0x06,
462 AML_FIELD_ATTRIB_SMB_WORD = 0x08, 472 AML_FIELD_ATTRIB_WORD = 0x08,
463 AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A, 473 AML_FIELD_ATTRIB_BLOCK = 0x0A,
464 AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C, 474 AML_FIELD_ATTRIB_MULTIBYTE = 0x0B,
465 AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D 475 AML_FIELD_ATTRIB_WORD_CALL = 0x0C,
476 AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D,
477 AML_FIELD_ATTRIB_RAW_BYTES = 0x0E,
478 AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F
466} AML_ACCESS_ATTRIBUTE; 479} AML_ACCESS_ATTRIBUTE;
467 480
468/* Bit fields in the AML method_flags byte */ 481/* Bit fields in the AML method_flags byte */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 59122cde247..7b2128f274e 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -58,29 +58,48 @@
58#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT" 58#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT"
59#define ACPI_RESTAG_BASEADDRESS "_BAS" 59#define ACPI_RESTAG_BASEADDRESS "_BAS"
60#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */ 60#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */
61#define ACPI_RESTAG_DEBOUNCETIME "_DBT"
61#define ACPI_RESTAG_DECODE "_DEC" 62#define ACPI_RESTAG_DECODE "_DEC"
63#define ACPI_RESTAG_DEVICEPOLARITY "_DPL"
62#define ACPI_RESTAG_DMA "_DMA" 64#define ACPI_RESTAG_DMA "_DMA"
63#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */ 65#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
66#define ACPI_RESTAG_DRIVESTRENGTH "_DRS"
67#define ACPI_RESTAG_ENDIANNESS "_END"
68#define ACPI_RESTAG_FLOWCONTROL "_FLC"
64#define ACPI_RESTAG_GRANULARITY "_GRA" 69#define ACPI_RESTAG_GRANULARITY "_GRA"
65#define ACPI_RESTAG_INTERRUPT "_INT" 70#define ACPI_RESTAG_INTERRUPT "_INT"
66#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */ 71#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
67#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */ 72#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
68#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */ 73#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
74#define ACPI_RESTAG_IORESTRICTION "_IOR"
69#define ACPI_RESTAG_LENGTH "_LEN" 75#define ACPI_RESTAG_LENGTH "_LEN"
76#define ACPI_RESTAG_LINE "_LIN"
70#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */ 77#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
71#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */ 78#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
72#define ACPI_RESTAG_MAXADDR "_MAX" 79#define ACPI_RESTAG_MAXADDR "_MAX"
73#define ACPI_RESTAG_MINADDR "_MIN" 80#define ACPI_RESTAG_MINADDR "_MIN"
74#define ACPI_RESTAG_MAXTYPE "_MAF" 81#define ACPI_RESTAG_MAXTYPE "_MAF"
75#define ACPI_RESTAG_MINTYPE "_MIF" 82#define ACPI_RESTAG_MINTYPE "_MIF"
83#define ACPI_RESTAG_MODE "_MOD"
84#define ACPI_RESTAG_PARITY "_PAR"
85#define ACPI_RESTAG_PHASE "_PHA"
86#define ACPI_RESTAG_PIN "_PIN"
87#define ACPI_RESTAG_PINCONFIG "_PPI"
88#define ACPI_RESTAG_POLARITY "_POL"
76#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO" 89#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO"
77#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW" 90#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW"
78#define ACPI_RESTAG_RANGETYPE "_RNG" 91#define ACPI_RESTAG_RANGETYPE "_RNG"
79#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */ 92#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
93#define ACPI_RESTAG_LENGTH_RX "_RXL"
94#define ACPI_RESTAG_LENGTH_TX "_TXL"
95#define ACPI_RESTAG_SLAVEMODE "_SLV"
96#define ACPI_RESTAG_SPEED "_SPE"
97#define ACPI_RESTAG_STOPBITS "_STB"
80#define ACPI_RESTAG_TRANSLATION "_TRA" 98#define ACPI_RESTAG_TRANSLATION "_TRA"
81#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */ 99#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
82#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */ 100#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
83#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */ 101#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
102#define ACPI_RESTAG_VENDORDATA "_VEN"
84 103
85/* Default sizes for "small" resource descriptors */ 104/* Default sizes for "small" resource descriptors */
86 105
@@ -90,6 +109,7 @@
90#define ASL_RDESC_END_DEPEND_SIZE 0x00 109#define ASL_RDESC_END_DEPEND_SIZE 0x00
91#define ASL_RDESC_IO_SIZE 0x07 110#define ASL_RDESC_IO_SIZE 0x07
92#define ASL_RDESC_FIXED_IO_SIZE 0x03 111#define ASL_RDESC_FIXED_IO_SIZE 0x03
112#define ASL_RDESC_FIXED_DMA_SIZE 0x05
93#define ASL_RDESC_END_TAG_SIZE 0x01 113#define ASL_RDESC_END_TAG_SIZE 0x01
94 114
95struct asl_resource_node { 115struct asl_resource_node {
@@ -164,6 +184,12 @@ struct aml_resource_end_tag {
164 AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum; 184 AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum;
165}; 185};
166 186
187struct aml_resource_fixed_dma {
188 AML_RESOURCE_SMALL_HEADER_COMMON u16 request_lines;
189 u16 channels;
190 u8 width;
191};
192
167/* 193/*
168 * LARGE descriptors 194 * LARGE descriptors
169 */ 195 */
@@ -263,6 +289,110 @@ struct aml_resource_generic_register {
263 u64 address; 289 u64 address;
264}; 290};
265 291
292/* Common descriptor for gpio_int and gpio_io (ACPI 5.0) */
293
294struct aml_resource_gpio {
295 AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
296 u8 connection_type;
297 u16 flags;
298 u16 int_flags;
299 u8 pin_config;
300 u16 drive_strength;
301 u16 debounce_timeout;
302 u16 pin_table_offset;
303 u8 res_source_index;
304 u16 res_source_offset;
305 u16 vendor_offset;
306 u16 vendor_length;
307 /*
308 * Optional fields follow immediately:
309 * 1) PIN list (Words)
310 * 2) Resource Source String
311 * 3) Vendor Data bytes
312 */
313};
314
315#define AML_RESOURCE_GPIO_REVISION 1 /* ACPI 5.0 */
316
317/* Values for connection_type above */
318
319#define AML_RESOURCE_GPIO_TYPE_INT 0
320#define AML_RESOURCE_GPIO_TYPE_IO 1
321#define AML_RESOURCE_MAX_GPIOTYPE 1
322
323/* Common preamble for all serial descriptors (ACPI 5.0) */
324
325#define AML_RESOURCE_SERIAL_COMMON \
326 u8 revision_id; \
327 u8 res_source_index; \
328 u8 type; \
329 u8 flags; \
330 u16 type_specific_flags; \
331 u8 type_revision_id; \
332 u16 type_data_length; \
333
334/* Values for the type field above */
335
336#define AML_RESOURCE_I2C_SERIALBUSTYPE 1
337#define AML_RESOURCE_SPI_SERIALBUSTYPE 2
338#define AML_RESOURCE_UART_SERIALBUSTYPE 3
339#define AML_RESOURCE_MAX_SERIALBUSTYPE 3
340#define AML_RESOURCE_VENDOR_SERIALBUSTYPE 192 /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */
341
342struct aml_resource_common_serialbus {
343AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON};
344
345struct aml_resource_i2c_serialbus {
346 AML_RESOURCE_LARGE_HEADER_COMMON
347 AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
348 u16 slave_address;
349 /*
350 * Optional fields follow immediately:
351 * 1) Vendor Data bytes
352 * 2) Resource Source String
353 */
354};
355
356#define AML_RESOURCE_I2C_REVISION 1 /* ACPI 5.0 */
357#define AML_RESOURCE_I2C_TYPE_REVISION 1 /* ACPI 5.0 */
358#define AML_RESOURCE_I2C_MIN_DATA_LEN 6
359
360struct aml_resource_spi_serialbus {
361 AML_RESOURCE_LARGE_HEADER_COMMON
362 AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
363 u8 data_bit_length;
364 u8 clock_phase;
365 u8 clock_polarity;
366 u16 device_selection;
367 /*
368 * Optional fields follow immediately:
369 * 1) Vendor Data bytes
370 * 2) Resource Source String
371 */
372};
373
374#define AML_RESOURCE_SPI_REVISION 1 /* ACPI 5.0 */
375#define AML_RESOURCE_SPI_TYPE_REVISION 1 /* ACPI 5.0 */
376#define AML_RESOURCE_SPI_MIN_DATA_LEN 9
377
378struct aml_resource_uart_serialbus {
379 AML_RESOURCE_LARGE_HEADER_COMMON
380 AML_RESOURCE_SERIAL_COMMON u32 default_baud_rate;
381 u16 rx_fifo_size;
382 u16 tx_fifo_size;
383 u8 parity;
384 u8 lines_enabled;
385 /*
386 * Optional fields follow immediately:
387 * 1) Vendor Data bytes
388 * 2) Resource Source String
389 */
390};
391
392#define AML_RESOURCE_UART_REVISION 1 /* ACPI 5.0 */
393#define AML_RESOURCE_UART_TYPE_REVISION 1 /* ACPI 5.0 */
394#define AML_RESOURCE_UART_MIN_DATA_LEN 10
395
266/* restore default alignment */ 396/* restore default alignment */
267 397
268#pragma pack() 398#pragma pack()
@@ -284,6 +414,7 @@ union aml_resource {
284 struct aml_resource_end_dependent end_dpf; 414 struct aml_resource_end_dependent end_dpf;
285 struct aml_resource_io io; 415 struct aml_resource_io io;
286 struct aml_resource_fixed_io fixed_io; 416 struct aml_resource_fixed_io fixed_io;
417 struct aml_resource_fixed_dma fixed_dma;
287 struct aml_resource_vendor_small vendor_small; 418 struct aml_resource_vendor_small vendor_small;
288 struct aml_resource_end_tag end_tag; 419 struct aml_resource_end_tag end_tag;
289 420
@@ -299,6 +430,11 @@ union aml_resource {
299 struct aml_resource_address64 address64; 430 struct aml_resource_address64 address64;
300 struct aml_resource_extended_address64 ext_address64; 431 struct aml_resource_extended_address64 ext_address64;
301 struct aml_resource_extended_irq extended_irq; 432 struct aml_resource_extended_irq extended_irq;
433 struct aml_resource_gpio gpio;
434 struct aml_resource_i2c_serialbus i2c_serial_bus;
435 struct aml_resource_spi_serialbus spi_serial_bus;
436 struct aml_resource_uart_serialbus uart_serial_bus;
437 struct aml_resource_common_serialbus common_serial_bus;
302 438
303 /* Utility overlays */ 439 /* Utility overlays */
304 440
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 8c7b99728aa..80eb1900297 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -250,6 +250,13 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
250 status = acpi_ds_execute_arguments(node, node->parent, 250 status = acpi_ds_execute_arguments(node, node->parent,
251 extra_desc->extra.aml_length, 251 extra_desc->extra.aml_length,
252 extra_desc->extra.aml_start); 252 extra_desc->extra.aml_start);
253 if (ACPI_FAILURE(status)) {
254 return_ACPI_STATUS(status);
255 }
256
257 status = acpi_ut_add_address_range(obj_desc->region.space_id,
258 obj_desc->region.address,
259 obj_desc->region.length, node);
253 return_ACPI_STATUS(status); 260 return_ACPI_STATUS(status);
254} 261}
255 262
@@ -384,8 +391,15 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
384 391
385 /* Execute the argument AML */ 392 /* Execute the argument AML */
386 393
387 status = acpi_ds_execute_arguments(node, node->parent, 394 status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
388 extra_desc->extra.aml_length, 395 extra_desc->extra.aml_length,
389 extra_desc->extra.aml_start); 396 extra_desc->extra.aml_start);
397 if (ACPI_FAILURE(status)) {
398 return_ACPI_STATUS(status);
399 }
400
401 status = acpi_ut_add_address_range(obj_desc->region.space_id,
402 obj_desc->region.address,
403 obj_desc->region.length, node);
390 return_ACPI_STATUS(status); 404 return_ACPI_STATUS(status);
391} 405}
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 26c49fff58d..effe4ca1133 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 34be60c0e44..cd243cf2cab 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -221,6 +221,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
221{ 221{
222 acpi_status status; 222 acpi_status status;
223 u64 position; 223 u64 position;
224 union acpi_parse_object *child;
224 225
225 ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info); 226 ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
226 227
@@ -232,10 +233,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
232 233
233 while (arg) { 234 while (arg) {
234 /* 235 /*
235 * Three types of field elements are handled: 236 * Four types of field elements are handled:
236 * 1) Offset - specifies a bit offset 237 * 1) Name - Enters a new named field into the namespace
237 * 2) access_as - changes the access mode 238 * 2) Offset - specifies a bit offset
238 * 3) Name - Enters a new named field into the namespace 239 * 3) access_as - changes the access mode/attributes
240 * 4) Connection - Associate a resource template with the field
239 */ 241 */
240 switch (arg->common.aml_opcode) { 242 switch (arg->common.aml_opcode) {
241 case AML_INT_RESERVEDFIELD_OP: 243 case AML_INT_RESERVEDFIELD_OP:
@@ -253,21 +255,70 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
253 break; 255 break;
254 256
255 case AML_INT_ACCESSFIELD_OP: 257 case AML_INT_ACCESSFIELD_OP:
256 258 case AML_INT_EXTACCESSFIELD_OP:
257 /* 259 /*
258 * Get a new access_type and access_attribute -- to be used for all 260 * Get new access_type, access_attribute, and access_length fields
259 * field units that follow, until field end or another access_as 261 * -- to be used for all field units that follow, until the
260 * keyword. 262 * end-of-field or another access_as keyword is encountered.
263 * NOTE. These three bytes are encoded in the integer value
264 * of the parseop for convenience.
261 * 265 *
262 * In field_flags, preserve the flag bits other than the 266 * In field_flags, preserve the flag bits other than the
263 * ACCESS_TYPE bits 267 * ACCESS_TYPE bits.
264 */ 268 */
269
270 /* access_type (byte_acc, word_acc, etc.) */
271
265 info->field_flags = (u8) 272 info->field_flags = (u8)
266 ((info-> 273 ((info->
267 field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) | 274 field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
268 ((u8) ((u32) arg->common.value.integer >> 8))); 275 ((u8)((u32)(arg->common.value.integer & 0x07))));
276
277 /* access_attribute (attrib_quick, attrib_byte, etc.) */
278
279 info->attribute =
280 (u8)((arg->common.value.integer >> 8) & 0xFF);
281
282 /* access_length (for serial/buffer protocols) */
283
284 info->access_length =
285 (u8)((arg->common.value.integer >> 16) & 0xFF);
286 break;
287
288 case AML_INT_CONNECTION_OP:
289 /*
290 * Clear any previous connection. New connection is used for all
291 * fields that follow, similar to access_as
292 */
293 info->resource_buffer = NULL;
294 info->connection_node = NULL;
269 295
270 info->attribute = (u8) (arg->common.value.integer); 296 /*
297 * A Connection() is either an actual resource descriptor (buffer)
298 * or a named reference to a resource template
299 */
300 child = arg->common.value.arg;
301 if (child->common.aml_opcode == AML_INT_BYTELIST_OP) {
302 info->resource_buffer = child->named.data;
303 info->resource_length =
304 (u16)child->named.value.integer;
305 } else {
306 /* Lookup the Connection() namepath, it should already exist */
307
308 status = acpi_ns_lookup(walk_state->scope_info,
309 child->common.value.
310 name, ACPI_TYPE_ANY,
311 ACPI_IMODE_EXECUTE,
312 ACPI_NS_DONT_OPEN_SCOPE,
313 walk_state,
314 &info->connection_node);
315 if (ACPI_FAILURE(status)) {
316 ACPI_ERROR_NAMESPACE(child->common.
317 value.name,
318 status);
319 return_ACPI_STATUS(status);
320 }
321 }
271 break; 322 break;
272 323
273 case AML_INT_NAMEDFIELD_OP: 324 case AML_INT_NAMEDFIELD_OP:
@@ -374,6 +425,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
374 } 425 }
375 } 426 }
376 427
428 ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
429
377 /* Second arg is the field flags */ 430 /* Second arg is the field flags */
378 431
379 arg = arg->common.next; 432 arg = arg->common.next;
@@ -386,7 +439,6 @@ acpi_ds_create_field(union acpi_parse_object *op,
386 info.region_node = region_node; 439 info.region_node = region_node;
387 440
388 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); 441 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
389
390 return_ACPI_STATUS(status); 442 return_ACPI_STATUS(status);
391} 443}
392 444
@@ -474,8 +526,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
474 */ 526 */
475 while (arg) { 527 while (arg) {
476 /* 528 /*
477 * Ignore OFFSET and ACCESSAS terms here; we are only interested in the 529 * Ignore OFFSET/ACCESSAS/CONNECTION terms here; we are only interested
478 * field names in order to enter them into the namespace. 530 * in the field names in order to enter them into the namespace.
479 */ 531 */
480 if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) { 532 if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
481 status = acpi_ns_lookup(walk_state->scope_info, 533 status = acpi_ns_lookup(walk_state->scope_info,
@@ -651,6 +703,5 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
651 info.region_node = region_node; 703 info.region_node = region_node;
652 704
653 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); 705 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
654
655 return_ACPI_STATUS(status); 706 return_ACPI_STATUS(status);
656} 707}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a7718bf2b9a..9e5ac7f780a 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 5d797751e20..00f5dab5bcc 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 905ce29a92e..b40bd507be5 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index f42e17e5c25..d7045ca3e32 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index c627a288e02..e5eff758510 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 2c477ce172f..1abcda31037 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index fe40e4c6554..642f3c053e8 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 324acec1179..552aa3a50c8 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 976318138c5..ae714772476 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 76a661fc1e0..9e9490a9cbf 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index a6c374ef991..c9c2ac13e7c 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index d458b041e65..6729ebe2f1e 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -71,6 +71,12 @@ acpi_status acpi_ev_initialize_events(void)
71 71
72 ACPI_FUNCTION_TRACE(ev_initialize_events); 72 ACPI_FUNCTION_TRACE(ev_initialize_events);
73 73
74 /* If Hardware Reduced flag is set, there are no fixed events */
75
76 if (acpi_gbl_reduced_hardware) {
77 return_ACPI_STATUS(AE_OK);
78 }
79
74 /* 80 /*
75 * Initialize the Fixed and General Purpose Events. This is done prior to 81 * Initialize the Fixed and General Purpose Events. This is done prior to
76 * enabling SCIs to prevent interrupts from occurring before the handlers 82 * enabling SCIs to prevent interrupts from occurring before the handlers
@@ -111,6 +117,12 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
111 117
112 ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers); 118 ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
113 119
120 /* If Hardware Reduced flag is set, there is no ACPI h/w */
121
122 if (acpi_gbl_reduced_hardware) {
123 return_ACPI_STATUS(AE_OK);
124 }
125
114 /* Install the SCI handler */ 126 /* Install the SCI handler */
115 127
116 status = acpi_ev_install_sci_handler(); 128 status = acpi_ev_install_sci_handler();
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 56a562a1e5d..5e5683cb1f0 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,12 @@ acpi_status acpi_ev_init_global_lock_handler(void)
70 70
71 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); 71 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
72 72
73 /* If Hardware Reduced flag is set, there is no global lock */
74
75 if (acpi_gbl_reduced_hardware) {
76 return_ACPI_STATUS(AE_OK);
77 }
78
73 /* Attempt installation of the global lock handler */ 79 /* Attempt installation of the global lock handler */
74 80
75 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, 81 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 65c79add3b1..9e88cb6fb25 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index ca2c41a5331..be75339cd5d 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index ce9aa9f9a97..adf7494da9d 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 80a81d0c4a8..25073932aa1 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index d0b33184442..84966f41646 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index f0edf5c43c0..1b0180a1b79 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -329,6 +329,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
329 * FUNCTION: acpi_ev_address_space_dispatch 329 * FUNCTION: acpi_ev_address_space_dispatch
330 * 330 *
331 * PARAMETERS: region_obj - Internal region object 331 * PARAMETERS: region_obj - Internal region object
332 * field_obj - Corresponding field. Can be NULL.
332 * Function - Read or Write operation 333 * Function - Read or Write operation
333 * region_offset - Where in the region to read or write 334 * region_offset - Where in the region to read or write
334 * bit_width - Field width in bits (8, 16, 32, or 64) 335 * bit_width - Field width in bits (8, 16, 32, or 64)
@@ -344,6 +345,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
344 345
345acpi_status 346acpi_status
346acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, 347acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
348 union acpi_operand_object *field_obj,
347 u32 function, 349 u32 function,
348 u32 region_offset, u32 bit_width, u64 *value) 350 u32 region_offset, u32 bit_width, u64 *value)
349{ 351{
@@ -353,6 +355,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
353 union acpi_operand_object *handler_desc; 355 union acpi_operand_object *handler_desc;
354 union acpi_operand_object *region_obj2; 356 union acpi_operand_object *region_obj2;
355 void *region_context = NULL; 357 void *region_context = NULL;
358 struct acpi_connection_info *context;
356 359
357 ACPI_FUNCTION_TRACE(ev_address_space_dispatch); 360 ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
358 361
@@ -375,6 +378,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
375 return_ACPI_STATUS(AE_NOT_EXIST); 378 return_ACPI_STATUS(AE_NOT_EXIST);
376 } 379 }
377 380
381 context = handler_desc->address_space.context;
382
378 /* 383 /*
379 * It may be the case that the region has never been initialized. 384 * It may be the case that the region has never been initialized.
380 * Some types of regions require special init code 385 * Some types of regions require special init code
@@ -404,8 +409,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
404 acpi_ex_exit_interpreter(); 409 acpi_ex_exit_interpreter();
405 410
406 status = region_setup(region_obj, ACPI_REGION_ACTIVATE, 411 status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
407 handler_desc->address_space.context, 412 context, &region_context);
408 &region_context);
409 413
410 /* Re-enter the interpreter */ 414 /* Re-enter the interpreter */
411 415
@@ -455,6 +459,25 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
455 acpi_ut_get_region_name(region_obj->region. 459 acpi_ut_get_region_name(region_obj->region.
456 space_id))); 460 space_id)));
457 461
462 /*
463 * Special handling for generic_serial_bus and general_purpose_io:
464 * There are three extra parameters that must be passed to the
465 * handler via the context:
466 * 1) Connection buffer, a resource template from Connection() op.
467 * 2) Length of the above buffer.
468 * 3) Actual access length from the access_as() op.
469 */
470 if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
471 (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
472 context && field_obj) {
473
474 /* Get the Connection (resource_template) buffer */
475
476 context->connection = field_obj->field.resource_buffer;
477 context->length = field_obj->field.resource_length;
478 context->access_length = field_obj->field.access_length;
479 }
480
458 if (!(handler_desc->address_space.handler_flags & 481 if (!(handler_desc->address_space.handler_flags &
459 ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { 482 ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
460 /* 483 /*
@@ -469,7 +492,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
469 492
470 status = handler(function, 493 status = handler(function,
471 (region_obj->region.address + region_offset), 494 (region_obj->region.address + region_offset),
472 bit_width, value, handler_desc->address_space.context, 495 bit_width, value, context,
473 region_obj2->extra.region_context); 496 region_obj2->extra.region_context);
474 497
475 if (ACPI_FAILURE(status)) { 498 if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 55a5d35ef34..819c17f5897 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 2ebd40e1a3e..26065c612e7 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index f4f523bf593..61944e89565 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 20516e59947..1768bbec100 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index f06a3ee356b..33388fd69df 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index aee887e3ca5..6019208cd4b 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 745a42b401f..c86d44e41bc 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
297 /* Bytewise reads */ 297 /* Bytewise reads */
298 298
299 for (i = 0; i < length; i++) { 299 for (i = 0; i < length; i++) {
300 status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ, 300 status =
301 region_offset, 8, 301 acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
302 &value); 302 region_offset, 8, &value);
303 if (ACPI_FAILURE(status)) { 303 if (ACPI_FAILURE(status)) {
304 return status; 304 return status;
305 } 305 }
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 74162a11817..e385436bd42 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 110711afada..3f5bc998c1c 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -267,7 +267,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
267 * 267 *
268 * PARAMETERS: aml_start - Pointer to the region declaration AML 268 * PARAMETERS: aml_start - Pointer to the region declaration AML
269 * aml_length - Max length of the declaration AML 269 * aml_length - Max length of the declaration AML
270 * region_space - space_iD for the region 270 * space_id - Address space ID for the region
271 * walk_state - Current state 271 * walk_state - Current state
272 * 272 *
273 * RETURN: Status 273 * RETURN: Status
@@ -279,7 +279,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
279acpi_status 279acpi_status
280acpi_ex_create_region(u8 * aml_start, 280acpi_ex_create_region(u8 * aml_start,
281 u32 aml_length, 281 u32 aml_length,
282 u8 region_space, struct acpi_walk_state *walk_state) 282 u8 space_id, struct acpi_walk_state *walk_state)
283{ 283{
284 acpi_status status; 284 acpi_status status;
285 union acpi_operand_object *obj_desc; 285 union acpi_operand_object *obj_desc;
@@ -304,16 +304,19 @@ acpi_ex_create_region(u8 * aml_start,
304 * Space ID must be one of the predefined IDs, or in the user-defined 304 * Space ID must be one of the predefined IDs, or in the user-defined
305 * range 305 * range
306 */ 306 */
307 if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && 307 if (!acpi_is_valid_space_id(space_id)) {
308 (region_space < ACPI_USER_REGION_BEGIN) && 308 /*
309 (region_space != ACPI_ADR_SPACE_DATA_TABLE)) { 309 * Print an error message, but continue. We don't want to abort
310 ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X", 310 * a table load for this exception. Instead, if the region is
311 region_space)); 311 * actually used at runtime, abort the executing method.
312 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); 312 */
313 ACPI_ERROR((AE_INFO,
314 "Invalid/unknown Address Space ID: 0x%2.2X",
315 space_id));
313 } 316 }
314 317
315 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n", 318 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
316 acpi_ut_get_region_name(region_space), region_space)); 319 acpi_ut_get_region_name(space_id), space_id));
317 320
318 /* Create the region descriptor */ 321 /* Create the region descriptor */
319 322
@@ -330,10 +333,16 @@ acpi_ex_create_region(u8 * aml_start,
330 region_obj2 = obj_desc->common.next_object; 333 region_obj2 = obj_desc->common.next_object;
331 region_obj2->extra.aml_start = aml_start; 334 region_obj2->extra.aml_start = aml_start;
332 region_obj2->extra.aml_length = aml_length; 335 region_obj2->extra.aml_length = aml_length;
336 if (walk_state->scope_info) {
337 region_obj2->extra.scope_node =
338 walk_state->scope_info->scope.node;
339 } else {
340 region_obj2->extra.scope_node = node;
341 }
333 342
334 /* Init the region from the operands */ 343 /* Init the region from the operands */
335 344
336 obj_desc->region.space_id = region_space; 345 obj_desc->region.space_id = space_id;
337 obj_desc->region.address = 0; 346 obj_desc->region.address = 0;
338 obj_desc->region.length = 0; 347 obj_desc->region.length = 0;
339 obj_desc->region.node = node; 348 obj_desc->region.node = node;
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index c7a2f1edd28..e211e9c1921 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 61b8c0e8b74..2a6ac0a3bc1 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -192,10 +192,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
192 "Buffer Object"} 192 "Buffer Object"}
193}; 193};
194 194
195static struct acpi_exdump_info acpi_ex_dump_region_field[3] = { 195static struct acpi_exdump_info acpi_ex_dump_region_field[5] = {
196 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL}, 196 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
197 {ACPI_EXD_FIELD, 0, NULL}, 197 {ACPI_EXD_FIELD, 0, NULL},
198 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"} 198 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"},
199 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"},
200 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer),
201 "ResourceBuffer"}
199}; 202};
200 203
201static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = { 204static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 0bde2230c02..dc092f5b35d 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -100,18 +100,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
100 (obj_desc->field.region_obj->region.space_id == 100 (obj_desc->field.region_obj->region.space_id ==
101 ACPI_ADR_SPACE_SMBUS 101 ACPI_ADR_SPACE_SMBUS
102 || obj_desc->field.region_obj->region.space_id == 102 || obj_desc->field.region_obj->region.space_id ==
103 ACPI_ADR_SPACE_GSBUS
104 || obj_desc->field.region_obj->region.space_id ==
103 ACPI_ADR_SPACE_IPMI)) { 105 ACPI_ADR_SPACE_IPMI)) {
104 /* 106 /*
105 * This is an SMBus or IPMI read. We must create a buffer to hold 107 * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
106 * the data and then directly access the region handler. 108 * the data and then directly access the region handler.
107 * 109 *
108 * Note: Smbus protocol value is passed in upper 16-bits of Function 110 * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
109 */ 111 */
110 if (obj_desc->field.region_obj->region.space_id == 112 if (obj_desc->field.region_obj->region.space_id ==
111 ACPI_ADR_SPACE_SMBUS) { 113 ACPI_ADR_SPACE_SMBUS) {
112 length = ACPI_SMBUS_BUFFER_SIZE; 114 length = ACPI_SMBUS_BUFFER_SIZE;
113 function = 115 function =
114 ACPI_READ | (obj_desc->field.attribute << 16); 116 ACPI_READ | (obj_desc->field.attribute << 16);
117 } else if (obj_desc->field.region_obj->region.space_id ==
118 ACPI_ADR_SPACE_GSBUS) {
119 length = ACPI_GSBUS_BUFFER_SIZE;
120 function =
121 ACPI_READ | (obj_desc->field.attribute << 16);
115 } else { /* IPMI */ 122 } else { /* IPMI */
116 123
117 length = ACPI_IPMI_BUFFER_SIZE; 124 length = ACPI_IPMI_BUFFER_SIZE;
@@ -248,21 +255,23 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
248 (obj_desc->field.region_obj->region.space_id == 255 (obj_desc->field.region_obj->region.space_id ==
249 ACPI_ADR_SPACE_SMBUS 256 ACPI_ADR_SPACE_SMBUS
250 || obj_desc->field.region_obj->region.space_id == 257 || obj_desc->field.region_obj->region.space_id ==
258 ACPI_ADR_SPACE_GSBUS
259 || obj_desc->field.region_obj->region.space_id ==
251 ACPI_ADR_SPACE_IPMI)) { 260 ACPI_ADR_SPACE_IPMI)) {
252 /* 261 /*
253 * This is an SMBus or IPMI write. We will bypass the entire field 262 * This is an SMBus, GSBus or IPMI write. We will bypass the entire field
254 * mechanism and handoff the buffer directly to the handler. For 263 * mechanism and handoff the buffer directly to the handler. For
255 * these address spaces, the buffer is bi-directional; on a write, 264 * these address spaces, the buffer is bi-directional; on a write,
256 * return data is returned in the same buffer. 265 * return data is returned in the same buffer.
257 * 266 *
258 * Source must be a buffer of sufficient size: 267 * Source must be a buffer of sufficient size:
259 * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE. 268 * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
260 * 269 *
261 * Note: SMBus protocol type is passed in upper 16-bits of Function 270 * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
262 */ 271 */
263 if (source_desc->common.type != ACPI_TYPE_BUFFER) { 272 if (source_desc->common.type != ACPI_TYPE_BUFFER) {
264 ACPI_ERROR((AE_INFO, 273 ACPI_ERROR((AE_INFO,
265 "SMBus or IPMI write requires Buffer, found type %s", 274 "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
266 acpi_ut_get_object_type_name(source_desc))); 275 acpi_ut_get_object_type_name(source_desc)));
267 276
268 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 277 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -273,6 +282,11 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
273 length = ACPI_SMBUS_BUFFER_SIZE; 282 length = ACPI_SMBUS_BUFFER_SIZE;
274 function = 283 function =
275 ACPI_WRITE | (obj_desc->field.attribute << 16); 284 ACPI_WRITE | (obj_desc->field.attribute << 16);
285 } else if (obj_desc->field.region_obj->region.space_id ==
286 ACPI_ADR_SPACE_GSBUS) {
287 length = ACPI_GSBUS_BUFFER_SIZE;
288 function =
289 ACPI_WRITE | (obj_desc->field.attribute << 16);
276 } else { /* IPMI */ 290 } else { /* IPMI */
277 291
278 length = ACPI_IPMI_BUFFER_SIZE; 292 length = ACPI_IPMI_BUFFER_SIZE;
@@ -281,7 +295,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
281 295
282 if (source_desc->buffer.length < length) { 296 if (source_desc->buffer.length < length) {
283 ACPI_ERROR((AE_INFO, 297 ACPI_ERROR((AE_INFO,
284 "SMBus or IPMI write requires Buffer of length %u, found length %u", 298 "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
285 length, source_desc->buffer.length)); 299 length, source_desc->buffer.length));
286 300
287 return_ACPI_STATUS(AE_AML_BUFFER_LIMIT); 301 return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f915a7f3f92..149de45fdad 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
86{ 86{
87 acpi_status status = AE_OK; 87 acpi_status status = AE_OK;
88 union acpi_operand_object *rgn_desc; 88 union acpi_operand_object *rgn_desc;
89 u8 space_id;
89 90
90 ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset); 91 ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
91 92
@@ -101,6 +102,17 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
101 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 102 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
102 } 103 }
103 104
105 space_id = rgn_desc->region.space_id;
106
107 /* Validate the Space ID */
108
109 if (!acpi_is_valid_space_id(space_id)) {
110 ACPI_ERROR((AE_INFO,
111 "Invalid/unknown Address Space ID: 0x%2.2X",
112 space_id));
113 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
114 }
115
104 /* 116 /*
105 * If the Region Address and Length have not been previously evaluated, 117 * If the Region Address and Length have not been previously evaluated,
106 * evaluate them now and save the results. 118 * evaluate them now and save the results.
@@ -119,11 +131,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
119 } 131 }
120 132
121 /* 133 /*
122 * Exit now for SMBus or IPMI address space, it has a non-linear 134 * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
123 * address space and the request cannot be directly validated 135 * address space and the request cannot be directly validated
124 */ 136 */
125 if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS || 137 if (space_id == ACPI_ADR_SPACE_SMBUS ||
126 rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) { 138 space_id == ACPI_ADR_SPACE_GSBUS ||
139 space_id == ACPI_ADR_SPACE_IPMI) {
127 140
128 /* SMBus or IPMI has a non-linear address space */ 141 /* SMBus or IPMI has a non-linear address space */
129 142
@@ -271,11 +284,12 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
271 284
272 /* Invoke the appropriate address_space/op_region handler */ 285 /* Invoke the appropriate address_space/op_region handler */
273 286
274 status = 287 status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc,
275 acpi_ev_address_space_dispatch(rgn_desc, function, region_offset, 288 function, region_offset,
276 ACPI_MUL_8(obj_desc->common_field. 289 ACPI_MUL_8(obj_desc->
277 access_byte_width), 290 common_field.
278 value); 291 access_byte_width),
292 value);
279 293
280 if (ACPI_FAILURE(status)) { 294 if (ACPI_FAILURE(status)) {
281 if (status == AE_NOT_IMPLEMENTED) { 295 if (status == AE_NOT_IMPLEMENTED) {
@@ -316,6 +330,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
316static u8 330static u8
317acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) 331acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
318{ 332{
333 ACPI_FUNCTION_NAME(ex_register_overflow);
319 334
320 if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) { 335 if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
321 /* 336 /*
@@ -330,6 +345,11 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
330 * The Value is larger than the maximum value that can fit into 345 * The Value is larger than the maximum value that can fit into
331 * the register. 346 * the register.
332 */ 347 */
348 ACPI_ERROR((AE_INFO,
349 "Index value 0x%8.8X%8.8X overflows field width 0x%X",
350 ACPI_FORMAT_UINT64(value),
351 obj_desc->common_field.bit_length));
352
333 return (TRUE); 353 return (TRUE);
334 } 354 }
335 355
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 703d88ed0b3..0a089331034 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index be1c56ead65..60933e9dc3c 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 49ec049c157..fcc75fa27d3 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 236ead14b7f..9ba8c73cea1 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 2571b4a310f..879e8a277b9 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 1b48d9d28c9..71fcc65c9ff 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f4a2787e8e9..0786b865906 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index cc95e200040..30157f5a12d 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,7 @@
47#include "acinterp.h" 47#include "acinterp.h"
48#include "amlcode.h" 48#include "amlcode.h"
49#include "acnamesp.h" 49#include "acnamesp.h"
50#include "acdispat.h"
50 51
51#define _COMPONENT ACPI_EXECUTER 52#define _COMPONENT ACPI_EXECUTER
52ACPI_MODULE_NAME("exprep") 53ACPI_MODULE_NAME("exprep")
@@ -455,6 +456,30 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
455 obj_desc->field.region_obj = 456 obj_desc->field.region_obj =
456 acpi_ns_get_attached_object(info->region_node); 457 acpi_ns_get_attached_object(info->region_node);
457 458
459 /* Fields specific to generic_serial_bus fields */
460
461 obj_desc->field.access_length = info->access_length;
462
463 if (info->connection_node) {
464 second_desc = info->connection_node->object;
465 if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
466 status =
467 acpi_ds_get_buffer_arguments(second_desc);
468 if (ACPI_FAILURE(status)) {
469 acpi_ut_delete_object_desc(obj_desc);
470 return_ACPI_STATUS(status);
471 }
472 }
473
474 obj_desc->field.resource_buffer =
475 second_desc->buffer.pointer;
476 obj_desc->field.resource_length =
477 (u16)second_desc->buffer.length;
478 } else if (info->resource_buffer) {
479 obj_desc->field.resource_buffer = info->resource_buffer;
480 obj_desc->field.resource_length = info->resource_length;
481 }
482
458 /* Allow full data read from EC address space */ 483 /* Allow full data read from EC address space */
459 484
460 if ((obj_desc->field.region_obj->region.space_id == 485 if ((obj_desc->field.region_obj->region.space_id ==
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index f0d5e14f1f2..12d51df6d3b 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 55997e46948..fa50e77e64a 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index db502cd7d93..6e335dc3452 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index e3bb00ccdff..a67b1d925dd 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index c0c8842dd34..c6cf843cc4c 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index a979017d56b..b35bed52e06 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2011, Intel Corp. 10 * Copyright (C) 2000 - 2012, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc665cc554d..65a45d8335c 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index df66e7b686b..191a1294522 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 8ad93146dd3..eb6798ba8b5 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -435,4 +435,29 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
435 } 435 }
436} 436}
437 437
438/*******************************************************************************
439 *
440 * FUNCTION: acpi_is_valid_space_id
441 *
442 * PARAMETERS: space_id - ID to be validated
443 *
444 * RETURN: TRUE if valid/supported ID.
445 *
446 * DESCRIPTION: Validate an operation region space_iD.
447 *
448 ******************************************************************************/
449
450u8 acpi_is_valid_space_id(u8 space_id)
451{
452
453 if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) &&
454 (space_id < ACPI_USER_REGION_BEGIN) &&
455 (space_id != ACPI_ADR_SPACE_DATA_TABLE) &&
456 (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
457 return (FALSE);
458 }
459
460 return (TRUE);
461}
462
438#endif 463#endif
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index fc380d3d45a..d21ec5f0b3a 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index f610d88a66b..1a6894afef7 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 050fd227951..1455ddcdc32 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index cc70f3fdcdd..4ea4eeb51bf 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
7 ******************************************************************************/ 7 ******************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2011, Intel Corp. 10 * Copyright (C) 2000 - 2012, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d52da307365..3c4a922a9fc 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 50d21c40b5c..d4973d9da9f 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 5f160587465..6e5c43a60bb 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
134 /* Supported widths are 8/16/32 */ 134 /* Supported widths are 8/16/32 */
135 135
136 if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) { 136 if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
137 ACPI_ERROR((AE_INFO,
138 "Bad BitWidth parameter: %8.8X", bit_width));
137 return AE_BAD_PARAMETER; 139 return AE_BAD_PARAMETER;
138 } 140 }
139 141
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index d707756228c..9d38eb6c0d0 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index d93172fd15a..61623f3f682 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1d0ef15d158..7c3d3ceb98b 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index b683cc2ff9d..b7f2b3be79a 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 2ed294b7a4d..30ea5bc53a7 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index c1bd02b1a05..f375cb82e32 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index fd7c6380e29..9d84ec2f021 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 5f7dc691c18..5cbf15ffe7d 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d5fa520c3de..b20e7c8c3ff 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 3bb8bf105ea..dd77a3ce6e5 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index b3234fa795b..ec7ba2d3463 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index c845c8089f3..bbe46a447d3 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -620,6 +620,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
620 case ACPI_PTYPE2_FIXED: 620 case ACPI_PTYPE2_FIXED:
621 case ACPI_PTYPE2_MIN: 621 case ACPI_PTYPE2_MIN:
622 case ACPI_PTYPE2_COUNT: 622 case ACPI_PTYPE2_COUNT:
623 case ACPI_PTYPE2_FIX_VAR:
623 624
624 /* 625 /*
625 * These types all return a single Package that consists of a 626 * These types all return a single Package that consists of a
@@ -759,6 +760,34 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data,
759 } 760 }
760 break; 761 break;
761 762
763 case ACPI_PTYPE2_FIX_VAR:
764 /*
765 * Each subpackage has a fixed number of elements and an
766 * optional element
767 */
768 expected_count =
769 package->ret_info.count1 + package->ret_info.count2;
770 if (sub_package->package.count < expected_count) {
771 goto package_too_small;
772 }
773
774 status =
775 acpi_ns_check_package_elements(data, sub_elements,
776 package->ret_info.
777 object_type1,
778 package->ret_info.
779 count1,
780 package->ret_info.
781 object_type2,
782 sub_package->package.
783 count -
784 package->ret_info.
785 count1, 0);
786 if (ACPI_FAILURE(status)) {
787 return (status);
788 }
789 break;
790
762 case ACPI_PTYPE2_FIXED: 791 case ACPI_PTYPE2_FIXED:
763 792
764 /* Each sub-package has a fixed length */ 793 /* Each sub-package has a fixed length */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index ac7b854b0bd..9c35d20eb52 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -634,6 +634,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
634 case ACPI_PTYPE2_FIXED: 634 case ACPI_PTYPE2_FIXED:
635 case ACPI_PTYPE2_MIN: 635 case ACPI_PTYPE2_MIN:
636 case ACPI_PTYPE2_REV_FIXED: 636 case ACPI_PTYPE2_REV_FIXED:
637 case ACPI_PTYPE2_FIX_VAR:
637 break; 638 break;
638 639
639 default: 640 default:
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 024c4f263f8..726bc8e687f 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -467,11 +467,12 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
467 } 467 }
468 468
469 /* 469 /*
470 * Copy and uppercase the string. From the ACPI specification: 470 * Copy and uppercase the string. From the ACPI 5.0 specification:
471 * 471 *
472 * A valid PNP ID must be of the form "AAA####" where A is an uppercase 472 * A valid PNP ID must be of the form "AAA####" where A is an uppercase
473 * letter and # is a hex digit. A valid ACPI ID must be of the form 473 * letter and # is a hex digit. A valid ACPI ID must be of the form
474 * "ACPI####" where # is a hex digit. 474 * "NNNN####" where N is an uppercase letter or decimal digit, and
475 * # is a hex digit.
475 */ 476 */
476 for (dest = new_string->string.pointer; *source; dest++, source++) { 477 for (dest = new_string->string.pointer; *source; dest++, source++) {
477 *dest = (char)ACPI_TOUPPER(*source); 478 *dest = (char)ACPI_TOUPPER(*source);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 28b0d7a62b9..507043d6611 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index cb1b104a69a..a535b7afda5 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 345f0c3c6ad..f69895a5489 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index e7f016d1b22..71d15f61807 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 83bf9302430..af401c9c4df 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 57e6d825ed8..880a605cee2 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index e1fad0ee013..5ac36aba507 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -484,34 +484,54 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
484static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state 484static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
485 *parser_state) 485 *parser_state)
486{ 486{
487 u32 aml_offset = (u32) 487 u32 aml_offset;
488 ACPI_PTR_DIFF(parser_state->aml,
489 parser_state->aml_start);
490 union acpi_parse_object *field; 488 union acpi_parse_object *field;
489 union acpi_parse_object *arg = NULL;
491 u16 opcode; 490 u16 opcode;
492 u32 name; 491 u32 name;
492 u8 access_type;
493 u8 access_attribute;
494 u8 access_length;
495 u32 pkg_length;
496 u8 *pkg_end;
497 u32 buffer_length;
493 498
494 ACPI_FUNCTION_TRACE(ps_get_next_field); 499 ACPI_FUNCTION_TRACE(ps_get_next_field);
495 500
501 aml_offset =
502 (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
503
496 /* Determine field type */ 504 /* Determine field type */
497 505
498 switch (ACPI_GET8(parser_state->aml)) { 506 switch (ACPI_GET8(parser_state->aml)) {
499 default: 507 case AML_FIELD_OFFSET_OP:
500 508
501 opcode = AML_INT_NAMEDFIELD_OP; 509 opcode = AML_INT_RESERVEDFIELD_OP;
510 parser_state->aml++;
502 break; 511 break;
503 512
504 case 0x00: 513 case AML_FIELD_ACCESS_OP:
505 514
506 opcode = AML_INT_RESERVEDFIELD_OP; 515 opcode = AML_INT_ACCESSFIELD_OP;
507 parser_state->aml++; 516 parser_state->aml++;
508 break; 517 break;
509 518
510 case 0x01: 519 case AML_FIELD_CONNECTION_OP:
511 520
512 opcode = AML_INT_ACCESSFIELD_OP; 521 opcode = AML_INT_CONNECTION_OP;
522 parser_state->aml++;
523 break;
524
525 case AML_FIELD_EXT_ACCESS_OP:
526
527 opcode = AML_INT_EXTACCESSFIELD_OP;
513 parser_state->aml++; 528 parser_state->aml++;
514 break; 529 break;
530
531 default:
532
533 opcode = AML_INT_NAMEDFIELD_OP;
534 break;
515 } 535 }
516 536
517 /* Allocate a new field op */ 537 /* Allocate a new field op */
@@ -549,16 +569,111 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
549 break; 569 break;
550 570
551 case AML_INT_ACCESSFIELD_OP: 571 case AML_INT_ACCESSFIELD_OP:
572 case AML_INT_EXTACCESSFIELD_OP:
552 573
553 /* 574 /*
554 * Get access_type and access_attrib and merge into the field Op 575 * Get access_type and access_attrib and merge into the field Op
555 * access_type is first operand, access_attribute is second 576 * access_type is first operand, access_attribute is second. stuff
577 * these bytes into the node integer value for convenience.
556 */ 578 */
557 field->common.value.integer = 579
558 (((u32) ACPI_GET8(parser_state->aml) << 8)); 580 /* Get the two bytes (Type/Attribute) */
581
582 access_type = ACPI_GET8(parser_state->aml);
559 parser_state->aml++; 583 parser_state->aml++;
560 field->common.value.integer |= ACPI_GET8(parser_state->aml); 584 access_attribute = ACPI_GET8(parser_state->aml);
561 parser_state->aml++; 585 parser_state->aml++;
586
587 field->common.value.integer = (u8)access_type;
588 field->common.value.integer |= (u16)(access_attribute << 8);
589
590 /* This opcode has a third byte, access_length */
591
592 if (opcode == AML_INT_EXTACCESSFIELD_OP) {
593 access_length = ACPI_GET8(parser_state->aml);
594 parser_state->aml++;
595
596 field->common.value.integer |=
597 (u32)(access_length << 16);
598 }
599 break;
600
601 case AML_INT_CONNECTION_OP:
602
603 /*
604 * Argument for Connection operator can be either a Buffer
605 * (resource descriptor), or a name_string.
606 */
607 if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
608 parser_state->aml++;
609
610 pkg_end = parser_state->aml;
611 pkg_length =
612 acpi_ps_get_next_package_length(parser_state);
613 pkg_end += pkg_length;
614
615 if (parser_state->aml < pkg_end) {
616
617 /* Non-empty list */
618
619 arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
620 if (!arg) {
621 return_PTR(NULL);
622 }
623
624 /* Get the actual buffer length argument */
625
626 opcode = ACPI_GET8(parser_state->aml);
627 parser_state->aml++;
628
629 switch (opcode) {
630 case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
631 buffer_length =
632 ACPI_GET8(parser_state->aml);
633 parser_state->aml += 1;
634 break;
635
636 case AML_WORD_OP: /* AML_WORDDATA_ARG */
637 buffer_length =
638 ACPI_GET16(parser_state->aml);
639 parser_state->aml += 2;
640 break;
641
642 case AML_DWORD_OP: /* AML_DWORDATA_ARG */
643 buffer_length =
644 ACPI_GET32(parser_state->aml);
645 parser_state->aml += 4;
646 break;
647
648 default:
649 buffer_length = 0;
650 break;
651 }
652
653 /* Fill in bytelist data */
654
655 arg->named.value.size = buffer_length;
656 arg->named.data = parser_state->aml;
657 }
658
659 /* Skip to End of byte data */
660
661 parser_state->aml = pkg_end;
662 } else {
663 arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
664 if (!arg) {
665 return_PTR(NULL);
666 }
667
668 /* Get the Namestring argument */
669
670 arg->common.value.name =
671 acpi_ps_get_next_namestring(parser_state);
672 }
673
674 /* Link the buffer/namestring to parent (CONNECTION_OP) */
675
676 acpi_ps_append_arg(field, arg);
562 break; 677 break;
563 678
564 default: 679 default:
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 01dd70d1de5..9547ad8a620 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index bed08de7528..a0226fdcf75 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -638,7 +638,16 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
638 638
639/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, 639/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
640 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, 640 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
641 AML_FLAGS_EXEC_0A_0T_1R) 641 AML_FLAGS_EXEC_0A_0T_1R),
642
643/* ACPI 5.0 opcodes */
644
645/* 7F */ ACPI_OP("-ConnectField-", ARGP_CONNECTFIELD_OP,
646 ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
647 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
648/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
649 ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
650 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
642 651
643/*! [End] no source code translation !*/ 652/*! [End] no source code translation !*/
644}; 653};
@@ -657,7 +666,7 @@ static const u8 acpi_gbl_short_op_index[256] = {
657/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 666/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
658/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX, 667/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
659/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D, 668/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
660/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 669/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
661/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, 670/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
662/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, 671/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
663/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, 672/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 9bb0cbd37b5..2ff9c35a196 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a5faa1323a0..c872aa4b926 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index f1464c03aa4..2b03cdbbe1c 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -74,6 +74,12 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
74 74
75 ACPI_FUNCTION_ENTRY(); 75 ACPI_FUNCTION_ENTRY();
76 76
77/*
78 if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP)
79 {
80 return (Op->Common.Value.Arg);
81 }
82*/
77 /* Get the info structure for this opcode */ 83 /* Get the info structure for this opcode */
78 84
79 op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); 85 op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 7eda7850342..13bb131ae12 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 3312d6368bf..ab96cf47896 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 8086805d449..9d98c5ff66a 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 9e66f907842..a0305652394 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 3a8a89ec2ca..3c6df4b7eb2 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -313,6 +313,38 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
313 resource_source)); 313 resource_source));
314 break; 314 break;
315 315
316 case ACPI_RESOURCE_TYPE_GPIO:
317
318 total_size =
319 (acpi_rs_length) (total_size +
320 (resource->data.gpio.
321 pin_table_length * 2) +
322 resource->data.gpio.
323 resource_source.string_length +
324 resource->data.gpio.
325 vendor_length);
326
327 break;
328
329 case ACPI_RESOURCE_TYPE_SERIAL_BUS:
330
331 total_size =
332 acpi_gbl_aml_resource_serial_bus_sizes[resource->
333 data.
334 common_serial_bus.
335 type];
336
337 total_size = (acpi_rs_length) (total_size +
338 resource->data.
339 i2c_serial_bus.
340 resource_source.
341 string_length +
342 resource->data.
343 i2c_serial_bus.
344 vendor_length);
345
346 break;
347
316 default: 348 default:
317 break; 349 break;
318 } 350 }
@@ -362,10 +394,11 @@ acpi_rs_get_list_length(u8 * aml_buffer,
362 u32 extra_struct_bytes; 394 u32 extra_struct_bytes;
363 u8 resource_index; 395 u8 resource_index;
364 u8 minimum_aml_resource_length; 396 u8 minimum_aml_resource_length;
397 union aml_resource *aml_resource;
365 398
366 ACPI_FUNCTION_TRACE(rs_get_list_length); 399 ACPI_FUNCTION_TRACE(rs_get_list_length);
367 400
368 *size_needed = 0; 401 *size_needed = ACPI_RS_SIZE_MIN; /* Minimum size is one end_tag */
369 end_aml = aml_buffer + aml_buffer_length; 402 end_aml = aml_buffer + aml_buffer_length;
370 403
371 /* Walk the list of AML resource descriptors */ 404 /* Walk the list of AML resource descriptors */
@@ -376,9 +409,15 @@ acpi_rs_get_list_length(u8 * aml_buffer,
376 409
377 status = acpi_ut_validate_resource(aml_buffer, &resource_index); 410 status = acpi_ut_validate_resource(aml_buffer, &resource_index);
378 if (ACPI_FAILURE(status)) { 411 if (ACPI_FAILURE(status)) {
412 /*
413 * Exit on failure. Cannot continue because the descriptor length
414 * may be bogus also.
415 */
379 return_ACPI_STATUS(status); 416 return_ACPI_STATUS(status);
380 } 417 }
381 418
419 aml_resource = (void *)aml_buffer;
420
382 /* Get the resource length and base (minimum) AML size */ 421 /* Get the resource length and base (minimum) AML size */
383 422
384 resource_length = acpi_ut_get_resource_length(aml_buffer); 423 resource_length = acpi_ut_get_resource_length(aml_buffer);
@@ -422,10 +461,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
422 461
423 case ACPI_RESOURCE_NAME_END_TAG: 462 case ACPI_RESOURCE_NAME_END_TAG:
424 /* 463 /*
425 * End Tag: 464 * End Tag: This is the normal exit
426 * This is the normal exit, add size of end_tag
427 */ 465 */
428 *size_needed += ACPI_RS_SIZE_MIN;
429 return_ACPI_STATUS(AE_OK); 466 return_ACPI_STATUS(AE_OK);
430 467
431 case ACPI_RESOURCE_NAME_ADDRESS32: 468 case ACPI_RESOURCE_NAME_ADDRESS32:
@@ -457,6 +494,33 @@ acpi_rs_get_list_length(u8 * aml_buffer,
457 minimum_aml_resource_length); 494 minimum_aml_resource_length);
458 break; 495 break;
459 496
497 case ACPI_RESOURCE_NAME_GPIO:
498
499 /* Vendor data is optional */
500
501 if (aml_resource->gpio.vendor_length) {
502 extra_struct_bytes +=
503 aml_resource->gpio.vendor_offset -
504 aml_resource->gpio.pin_table_offset +
505 aml_resource->gpio.vendor_length;
506 } else {
507 extra_struct_bytes +=
508 aml_resource->large_header.resource_length +
509 sizeof(struct aml_resource_large_header) -
510 aml_resource->gpio.pin_table_offset;
511 }
512 break;
513
514 case ACPI_RESOURCE_NAME_SERIAL_BUS:
515
516 minimum_aml_resource_length =
517 acpi_gbl_resource_aml_serial_bus_sizes
518 [aml_resource->common_serial_bus.type];
519 extra_struct_bytes +=
520 aml_resource->common_serial_bus.resource_length -
521 minimum_aml_resource_length;
522 break;
523
460 default: 524 default:
461 break; 525 break;
462 } 526 }
@@ -467,9 +531,18 @@ acpi_rs_get_list_length(u8 * aml_buffer,
467 * Important: Round the size up for the appropriate alignment. This 531 * Important: Round the size up for the appropriate alignment. This
468 * is a requirement on IA64. 532 * is a requirement on IA64.
469 */ 533 */
470 buffer_size = acpi_gbl_resource_struct_sizes[resource_index] + 534 if (acpi_ut_get_resource_type(aml_buffer) ==
471 extra_struct_bytes; 535 ACPI_RESOURCE_NAME_SERIAL_BUS) {
472 buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size); 536 buffer_size =
537 acpi_gbl_resource_struct_serial_bus_sizes
538 [aml_resource->common_serial_bus.type] +
539 extra_struct_bytes;
540 } else {
541 buffer_size =
542 acpi_gbl_resource_struct_sizes[resource_index] +
543 extra_struct_bytes;
544 }
545 buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
473 546
474 *size_needed += buffer_size; 547 *size_needed += buffer_size;
475 548
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 4ce6e1147e8..46d6eb38ae6 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,70 @@ ACPI_MODULE_NAME("rscreate")
51 51
52/******************************************************************************* 52/*******************************************************************************
53 * 53 *
54 * FUNCTION: acpi_buffer_to_resource
55 *
56 * PARAMETERS: aml_buffer - Pointer to the resource byte stream
57 * aml_buffer_length - Length of the aml_buffer
58 * resource_ptr - Where the converted resource is returned
59 *
60 * RETURN: Status
61 *
62 * DESCRIPTION: Convert a raw AML buffer to a resource list
63 *
64 ******************************************************************************/
65acpi_status
66acpi_buffer_to_resource(u8 *aml_buffer,
67 u16 aml_buffer_length,
68 struct acpi_resource **resource_ptr)
69{
70 acpi_status status;
71 acpi_size list_size_needed;
72 void *resource;
73 void *current_resource_ptr;
74
75 /*
76 * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag
77 * is not required here.
78 */
79
80 /* Get the required length for the converted resource */
81
82 status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length,
83 &list_size_needed);
84 if (status == AE_AML_NO_RESOURCE_END_TAG) {
85 status = AE_OK;
86 }
87 if (ACPI_FAILURE(status)) {
88 return (status);
89 }
90
91 /* Allocate a buffer for the converted resource */
92
93 resource = ACPI_ALLOCATE_ZEROED(list_size_needed);
94 current_resource_ptr = resource;
95 if (!resource) {
96 return (AE_NO_MEMORY);
97 }
98
99 /* Perform the AML-to-Resource conversion */
100
101 status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
102 acpi_rs_convert_aml_to_resources,
103 &current_resource_ptr);
104 if (status == AE_AML_NO_RESOURCE_END_TAG) {
105 status = AE_OK;
106 }
107 if (ACPI_FAILURE(status)) {
108 ACPI_FREE(resource);
109 } else {
110 *resource_ptr = resource;
111 }
112
113 return (status);
114}
115
116/*******************************************************************************
117 *
54 * FUNCTION: acpi_rs_create_resource_list 118 * FUNCTION: acpi_rs_create_resource_list
55 * 119 *
56 * PARAMETERS: aml_buffer - Pointer to the resource byte stream 120 * PARAMETERS: aml_buffer - Pointer to the resource byte stream
@@ -66,9 +130,10 @@ ACPI_MODULE_NAME("rscreate")
66 * of device resources. 130 * of device resources.
67 * 131 *
68 ******************************************************************************/ 132 ******************************************************************************/
133
69acpi_status 134acpi_status
70acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer, 135acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
71 struct acpi_buffer *output_buffer) 136 struct acpi_buffer * output_buffer)
72{ 137{
73 138
74 acpi_status status; 139 acpi_status status;
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 33db7520c74..b4c58113239 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,11 +61,13 @@ static void acpi_rs_out_integer64(char *title, u64 value);
61 61
62static void acpi_rs_out_title(char *title); 62static void acpi_rs_out_title(char *title);
63 63
64static void acpi_rs_dump_byte_list(u16 length, u8 * data); 64static void acpi_rs_dump_byte_list(u16 length, u8 *data);
65 65
66static void acpi_rs_dump_dword_list(u8 length, u32 * data); 66static void acpi_rs_dump_word_list(u16 length, u16 *data);
67 67
68static void acpi_rs_dump_short_byte_list(u8 length, u8 * data); 68static void acpi_rs_dump_dword_list(u8 length, u32 *data);
69
70static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
69 71
70static void 72static void
71acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source); 73acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
@@ -309,6 +311,125 @@ struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
309 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL} 311 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
310}; 312};
311 313
314struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
315 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
316 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
317 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
318 "ConnectionType", acpi_gbl_ct_decode},
319 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
320 "ProducerConsumer", acpi_gbl_consume_decode},
321 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
322 acpi_gbl_ppc_decode},
323 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
324 acpi_gbl_shr_decode},
325 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
326 "IoRestriction", acpi_gbl_ior_decode},
327 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
328 acpi_gbl_he_decode},
329 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
330 acpi_gbl_ll_decode},
331 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
332 NULL},
333 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
334 "DebounceTimeout", NULL},
335 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
336 "ResourceSource", NULL},
337 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
338 "PinTableLength", NULL},
339 {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
340 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
341 NULL},
342 {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
343 NULL},
344};
345
346struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
347 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
348 "FixedDma", NULL},
349 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
350 "RequestLines", NULL},
351 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
352 NULL},
353 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
354 acpi_gbl_dts_decode},
355};
356
357#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
358 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
359 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
360 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
361 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
362 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
363 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
364 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
365 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
366 {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
367
368struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
369 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
370 "Common Serial Bus", NULL},
371 ACPI_RS_DUMP_COMMON_SERIAL_BUS
372};
373
374struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
375 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
376 "I2C Serial Bus", NULL},
377 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
378 ACPI_RSD_OFFSET(i2c_serial_bus.
379 access_mode),
380 "AccessMode", acpi_gbl_am_decode},
381 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
382 "ConnectionSpeed", NULL},
383 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
384 "SlaveAddress", NULL},
385};
386
387struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
388 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
389 "Spi Serial Bus", NULL},
390 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
391 ACPI_RSD_OFFSET(spi_serial_bus.
392 wire_mode), "WireMode",
393 acpi_gbl_wm_decode},
394 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
395 "DevicePolarity", acpi_gbl_dp_decode},
396 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
397 "DataBitLength", NULL},
398 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
399 "ClockPhase", acpi_gbl_cph_decode},
400 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
401 "ClockPolarity", acpi_gbl_cpo_decode},
402 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
403 "DeviceSelection", NULL},
404 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
405 "ConnectionSpeed", NULL},
406};
407
408struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
409 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
410 "Uart Serial Bus", NULL},
411 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
412 ACPI_RSD_OFFSET(uart_serial_bus.
413 flow_control),
414 "FlowControl", acpi_gbl_fc_decode},
415 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
416 "StopBits", acpi_gbl_sb_decode},
417 {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
418 "DataBits", acpi_gbl_bpb_decode},
419 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
420 acpi_gbl_ed_decode},
421 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
422 acpi_gbl_pt_decode},
423 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
424 "LinesEnabled", NULL},
425 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
426 "RxFifoSize", NULL},
427 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
428 "TxFifoSize", NULL},
429 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
430 "ConnectionSpeed", NULL},
431};
432
312/* 433/*
313 * Tables used for common address descriptor flag fields 434 * Tables used for common address descriptor flag fields
314 */ 435 */
@@ -413,7 +534,14 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
413 /* Data items, 8/16/32/64 bit */ 534 /* Data items, 8/16/32/64 bit */
414 535
415 case ACPI_RSD_UINT8: 536 case ACPI_RSD_UINT8:
416 acpi_rs_out_integer8(name, ACPI_GET8(target)); 537 if (table->pointer) {
538 acpi_rs_out_string(name, ACPI_CAST_PTR(char,
539 table->
540 pointer
541 [*target]));
542 } else {
543 acpi_rs_out_integer8(name, ACPI_GET8(target));
544 }
417 break; 545 break;
418 546
419 case ACPI_RSD_UINT16: 547 case ACPI_RSD_UINT16:
@@ -444,6 +572,13 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
444 0x03])); 572 0x03]));
445 break; 573 break;
446 574
575 case ACPI_RSD_3BITFLAG:
576 acpi_rs_out_string(name, ACPI_CAST_PTR(char,
577 table->
578 pointer[*target &
579 0x07]));
580 break;
581
447 case ACPI_RSD_SHORTLIST: 582 case ACPI_RSD_SHORTLIST:
448 /* 583 /*
449 * Short byte list (single line output) for DMA and IRQ resources 584 * Short byte list (single line output) for DMA and IRQ resources
@@ -456,6 +591,20 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
456 } 591 }
457 break; 592 break;
458 593
594 case ACPI_RSD_SHORTLISTX:
595 /*
596 * Short byte list (single line output) for GPIO vendor data
597 * Note: The list length is obtained from the previous table entry
598 */
599 if (previous_target) {
600 acpi_rs_out_title(name);
601 acpi_rs_dump_short_byte_list(*previous_target,
602 *
603 (ACPI_CAST_INDIRECT_PTR
604 (u8, target)));
605 }
606 break;
607
459 case ACPI_RSD_LONGLIST: 608 case ACPI_RSD_LONGLIST:
460 /* 609 /*
461 * Long byte list for Vendor resource data 610 * Long byte list for Vendor resource data
@@ -480,6 +629,18 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
480 } 629 }
481 break; 630 break;
482 631
632 case ACPI_RSD_WORDLIST:
633 /*
634 * Word list for GPIO Pin Table
635 * Note: The list length is obtained from the previous table entry
636 */
637 if (previous_target) {
638 acpi_rs_dump_word_list(*previous_target,
639 *(ACPI_CAST_INDIRECT_PTR
640 (u16, target)));
641 }
642 break;
643
483 case ACPI_RSD_ADDRESS: 644 case ACPI_RSD_ADDRESS:
484 /* 645 /*
485 * Common flags for all Address resources 646 * Common flags for all Address resources
@@ -627,14 +788,20 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
627 788
628 /* Dump the resource descriptor */ 789 /* Dump the resource descriptor */
629 790
630 acpi_rs_dump_descriptor(&resource_list->data, 791 if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
631 acpi_gbl_dump_resource_dispatch[type]); 792 acpi_rs_dump_descriptor(&resource_list->data,
793 acpi_gbl_dump_serial_bus_dispatch
794 [resource_list->data.
795 common_serial_bus.type]);
796 } else {
797 acpi_rs_dump_descriptor(&resource_list->data,
798 acpi_gbl_dump_resource_dispatch
799 [type]);
800 }
632 801
633 /* Point to the next resource structure */ 802 /* Point to the next resource structure */
634 803
635 resource_list = 804 resource_list = ACPI_NEXT_RESOURCE(resource_list);
636 ACPI_ADD_PTR(struct acpi_resource, resource_list,
637 resource_list->length);
638 805
639 /* Exit when END_TAG descriptor is reached */ 806 /* Exit when END_TAG descriptor is reached */
640 807
@@ -768,4 +935,13 @@ static void acpi_rs_dump_dword_list(u8 length, u32 * data)
768 } 935 }
769} 936}
770 937
938static void acpi_rs_dump_word_list(u16 length, u16 *data)
939{
940 u16 i;
941
942 for (i = 0; i < length; i++) {
943 acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]);
944 }
945}
946
771#endif 947#endif
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index f9ea60872aa..a9fa5158200 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -76,7 +76,10 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
76 acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */ 76 acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
77 acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ 77 acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
78 acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ 78 acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
79 acpi_rs_convert_generic_reg /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ 79 acpi_rs_convert_generic_reg, /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
80 acpi_rs_convert_gpio, /* 0x11, ACPI_RESOURCE_TYPE_GPIO */
81 acpi_rs_convert_fixed_dma, /* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */
82 NULL, /* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */
80}; 83};
81 84
82/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */ 85/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -94,7 +97,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
94 acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */ 97 acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
95 acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */ 98 acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */
96 acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */ 99 acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
97 NULL, /* 0x0A, Reserved */ 100 acpi_rs_convert_fixed_dma, /* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */
98 NULL, /* 0x0B, Reserved */ 101 NULL, /* 0x0B, Reserved */
99 NULL, /* 0x0C, Reserved */ 102 NULL, /* 0x0C, Reserved */
100 NULL, /* 0x0D, Reserved */ 103 NULL, /* 0x0D, Reserved */
@@ -114,7 +117,19 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
114 acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */ 117 acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
115 acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */ 118 acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
116 acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */ 119 acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
117 acpi_rs_convert_ext_address64 /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */ 120 acpi_rs_convert_ext_address64, /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
121 acpi_rs_convert_gpio, /* 0x0C, ACPI_RESOURCE_NAME_GPIO */
122 NULL, /* 0x0D, Reserved */
123 NULL, /* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */
124};
125
126/* Subtype table for serial_bus -- I2C, SPI, and UART */
127
128struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
129 NULL,
130 acpi_rs_convert_i2c_serial_bus,
131 acpi_rs_convert_spi_serial_bus,
132 acpi_rs_convert_uart_serial_bus,
118}; 133};
119 134
120#ifdef ACPI_FUTURE_USAGE 135#ifdef ACPI_FUTURE_USAGE
@@ -140,6 +155,16 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
140 acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ 155 acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
141 acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ 156 acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
142 acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ 157 acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
158 acpi_rs_dump_gpio, /* ACPI_RESOURCE_TYPE_GPIO */
159 acpi_rs_dump_fixed_dma, /* ACPI_RESOURCE_TYPE_FIXED_DMA */
160 NULL, /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
161};
162
163struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
164 NULL,
165 acpi_rs_dump_i2c_serial_bus, /* AML_RESOURCE_I2C_BUS_TYPE */
166 acpi_rs_dump_spi_serial_bus, /* AML_RESOURCE_SPI_BUS_TYPE */
167 acpi_rs_dump_uart_serial_bus, /* AML_RESOURCE_UART_BUS_TYPE */
143}; 168};
144#endif 169#endif
145 170
@@ -166,7 +191,10 @@ const u8 acpi_gbl_aml_resource_sizes[] = {
166 sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */ 191 sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */
167 sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ 192 sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
168 sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ 193 sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
169 sizeof(struct aml_resource_generic_register) /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ 194 sizeof(struct aml_resource_generic_register), /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
195 sizeof(struct aml_resource_gpio), /* ACPI_RESOURCE_TYPE_GPIO */
196 sizeof(struct aml_resource_fixed_dma), /* ACPI_RESOURCE_TYPE_FIXED_DMA */
197 sizeof(struct aml_resource_common_serialbus), /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
170}; 198};
171 199
172const u8 acpi_gbl_resource_struct_sizes[] = { 200const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -182,7 +210,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
182 ACPI_RS_SIZE_MIN, 210 ACPI_RS_SIZE_MIN,
183 ACPI_RS_SIZE(struct acpi_resource_io), 211 ACPI_RS_SIZE(struct acpi_resource_io),
184 ACPI_RS_SIZE(struct acpi_resource_fixed_io), 212 ACPI_RS_SIZE(struct acpi_resource_fixed_io),
185 0, 213 ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
186 0, 214 0,
187 0, 215 0,
188 0, 216 0,
@@ -202,5 +230,21 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
202 ACPI_RS_SIZE(struct acpi_resource_address16), 230 ACPI_RS_SIZE(struct acpi_resource_address16),
203 ACPI_RS_SIZE(struct acpi_resource_extended_irq), 231 ACPI_RS_SIZE(struct acpi_resource_extended_irq),
204 ACPI_RS_SIZE(struct acpi_resource_address64), 232 ACPI_RS_SIZE(struct acpi_resource_address64),
205 ACPI_RS_SIZE(struct acpi_resource_extended_address64) 233 ACPI_RS_SIZE(struct acpi_resource_extended_address64),
234 ACPI_RS_SIZE(struct acpi_resource_gpio),
235 ACPI_RS_SIZE(struct acpi_resource_common_serialbus)
236};
237
238const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
239 0,
240 sizeof(struct aml_resource_i2c_serialbus),
241 sizeof(struct aml_resource_spi_serialbus),
242 sizeof(struct aml_resource_uart_serialbus),
243};
244
245const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
246 0,
247 ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
248 ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
249 ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
206}; 250};
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 0c7efef008b..f6a081057a2 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 50b8ad21116..e23a9ec248c 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -264,3 +264,34 @@ struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
264 AML_OFFSET(dma.dma_channel_mask), 264 AML_OFFSET(dma.dma_channel_mask),
265 ACPI_RS_OFFSET(data.dma.channel_count)} 265 ACPI_RS_OFFSET(data.dma.channel_count)}
266}; 266};
267
268/*******************************************************************************
269 *
270 * acpi_rs_convert_fixed_dma
271 *
272 ******************************************************************************/
273
274struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
275 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA,
276 ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
277 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)},
278
279 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA,
280 sizeof(struct aml_resource_fixed_dma),
281 0},
282
283 /*
284 * These fields are contiguous in both the source and destination:
285 * request_lines
286 * Channels
287 */
288
289 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
290 AML_OFFSET(fixed_dma.request_lines),
291 2},
292
293 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
294 AML_OFFSET(fixed_dma.width),
295 1},
296
297};
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 1bfcef736c5..9be129f5d6f 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,8 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
70 struct acpi_resource **resource_ptr = 70 struct acpi_resource **resource_ptr =
71 ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context); 71 ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
72 struct acpi_resource *resource; 72 struct acpi_resource *resource;
73 union aml_resource *aml_resource;
74 struct acpi_rsconvert_info *conversion_table;
73 acpi_status status; 75 acpi_status status;
74 76
75 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources); 77 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
@@ -84,14 +86,37 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
84 "Misaligned resource pointer %p", resource)); 86 "Misaligned resource pointer %p", resource));
85 } 87 }
86 88
89 /* Get the appropriate conversion info table */
90
91 aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
92 if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
93 if (aml_resource->common_serial_bus.type >
94 AML_RESOURCE_MAX_SERIALBUSTYPE) {
95 conversion_table = NULL;
96 } else {
97 /* This is an I2C, SPI, or UART serial_bus descriptor */
98
99 conversion_table =
100 acpi_gbl_convert_resource_serial_bus_dispatch
101 [aml_resource->common_serial_bus.type];
102 }
103 } else {
104 conversion_table =
105 acpi_gbl_get_resource_dispatch[resource_index];
106 }
107
108 if (!conversion_table) {
109 ACPI_ERROR((AE_INFO,
110 "Invalid/unsupported resource descriptor: Type 0x%2.2X",
111 resource_index));
112 return (AE_AML_INVALID_RESOURCE_TYPE);
113 }
114
87 /* Convert the AML byte stream resource to a local resource struct */ 115 /* Convert the AML byte stream resource to a local resource struct */
88 116
89 status = 117 status =
90 acpi_rs_convert_aml_to_resource(resource, 118 acpi_rs_convert_aml_to_resource(resource, aml_resource,
91 ACPI_CAST_PTR(union aml_resource, 119 conversion_table);
92 aml),
93 acpi_gbl_get_resource_dispatch
94 [resource_index]);
95 if (ACPI_FAILURE(status)) { 120 if (ACPI_FAILURE(status)) {
96 ACPI_EXCEPTION((AE_INFO, status, 121 ACPI_EXCEPTION((AE_INFO, status,
97 "Could not convert AML resource (Type 0x%X)", 122 "Could not convert AML resource (Type 0x%X)",
@@ -106,7 +131,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
106 131
107 /* Point to the next structure in the output buffer */ 132 /* Point to the next structure in the output buffer */
108 133
109 *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length); 134 *resource_ptr = ACPI_NEXT_RESOURCE(resource);
110 return_ACPI_STATUS(AE_OK); 135 return_ACPI_STATUS(AE_OK);
111} 136}
112 137
@@ -135,6 +160,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
135{ 160{
136 u8 *aml = output_buffer; 161 u8 *aml = output_buffer;
137 u8 *end_aml = output_buffer + aml_size_needed; 162 u8 *end_aml = output_buffer + aml_size_needed;
163 struct acpi_rsconvert_info *conversion_table;
138 acpi_status status; 164 acpi_status status;
139 165
140 ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml); 166 ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
@@ -154,11 +180,34 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
154 180
155 /* Perform the conversion */ 181 /* Perform the conversion */
156 182
157 status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union 183 if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
158 aml_resource, 184 if (resource->data.common_serial_bus.type >
159 aml), 185 AML_RESOURCE_MAX_SERIALBUSTYPE) {
160 acpi_gbl_set_resource_dispatch 186 conversion_table = NULL;
161 [resource->type]); 187 } else {
188 /* This is an I2C, SPI, or UART serial_bus descriptor */
189
190 conversion_table =
191 acpi_gbl_convert_resource_serial_bus_dispatch
192 [resource->data.common_serial_bus.type];
193 }
194 } else {
195 conversion_table =
196 acpi_gbl_set_resource_dispatch[resource->type];
197 }
198
199 if (!conversion_table) {
200 ACPI_ERROR((AE_INFO,
201 "Invalid/unsupported resource descriptor: Type 0x%2.2X",
202 resource->type));
203 return (AE_AML_INVALID_RESOURCE_TYPE);
204 }
205
206 status = acpi_rs_convert_resource_to_aml(resource,
207 ACPI_CAST_PTR(union
208 aml_resource,
209 aml),
210 conversion_table);
162 if (ACPI_FAILURE(status)) { 211 if (ACPI_FAILURE(status)) {
163 ACPI_EXCEPTION((AE_INFO, status, 212 ACPI_EXCEPTION((AE_INFO, status,
164 "Could not convert resource (type 0x%X) to AML", 213 "Could not convert resource (type 0x%X) to AML",
@@ -192,9 +241,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
192 241
193 /* Point to the next input resource descriptor */ 242 /* Point to the next input resource descriptor */
194 243
195 resource = 244 resource = ACPI_NEXT_RESOURCE(resource);
196 ACPI_ADD_PTR(struct acpi_resource, resource,
197 resource->length);
198 } 245 }
199 246
200 /* Completed buffer, but did not find an end_tag resource descriptor */ 247 /* Completed buffer, but did not find an end_tag resource descriptor */
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 7cc6d8625f1..4fd611ad02b 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 410264b22a2..8073b371cc7 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
83 83
84 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource); 84 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
85 85
86 if (!info) {
87 return_ACPI_STATUS(AE_BAD_PARAMETER);
88 }
89
86 if (((acpi_size) resource) & 0x3) { 90 if (((acpi_size) resource) & 0x3) {
87 91
88 /* Each internal resource struct is expected to be 32-bit aligned */ 92 /* Each internal resource struct is expected to be 32-bit aligned */
@@ -101,7 +105,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
101 * table length (# of table entries) 105 * table length (# of table entries)
102 */ 106 */
103 count = INIT_TABLE_LENGTH(info); 107 count = INIT_TABLE_LENGTH(info);
104
105 while (count) { 108 while (count) {
106 /* 109 /*
107 * Source is the external AML byte stream buffer, 110 * Source is the external AML byte stream buffer,
@@ -145,6 +148,14 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
145 ((ACPI_GET8(source) >> info->value) & 0x03); 148 ((ACPI_GET8(source) >> info->value) & 0x03);
146 break; 149 break;
147 150
151 case ACPI_RSC_3BITFLAG:
152 /*
153 * Mask and shift the flag bits
154 */
155 ACPI_SET8(destination) = (u8)
156 ((ACPI_GET8(source) >> info->value) & 0x07);
157 break;
158
148 case ACPI_RSC_COUNT: 159 case ACPI_RSC_COUNT:
149 160
150 item_count = ACPI_GET8(source); 161 item_count = ACPI_GET8(source);
@@ -163,6 +174,69 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
163 (info->value * (item_count - 1)); 174 (info->value * (item_count - 1));
164 break; 175 break;
165 176
177 case ACPI_RSC_COUNT_GPIO_PIN:
178
179 target = ACPI_ADD_PTR(void, aml, info->value);
180 item_count = ACPI_GET16(target) - ACPI_GET16(source);
181
182 resource->length = resource->length + item_count;
183 item_count = item_count / 2;
184 ACPI_SET16(destination) = item_count;
185 break;
186
187 case ACPI_RSC_COUNT_GPIO_VEN:
188
189 item_count = ACPI_GET8(source);
190 ACPI_SET8(destination) = (u8)item_count;
191
192 resource->length = resource->length +
193 (info->value * item_count);
194 break;
195
196 case ACPI_RSC_COUNT_GPIO_RES:
197
198 /*
199 * Vendor data is optional (length/offset may both be zero)
200 * Examine vendor data length field first
201 */
202 target = ACPI_ADD_PTR(void, aml, (info->value + 2));
203 if (ACPI_GET16(target)) {
204
205 /* Use vendor offset to get resource source length */
206
207 target = ACPI_ADD_PTR(void, aml, info->value);
208 item_count =
209 ACPI_GET16(target) - ACPI_GET16(source);
210 } else {
211 /* No vendor data to worry about */
212
213 item_count = aml->large_header.resource_length +
214 sizeof(struct aml_resource_large_header) -
215 ACPI_GET16(source);
216 }
217
218 resource->length = resource->length + item_count;
219 ACPI_SET16(destination) = item_count;
220 break;
221
222 case ACPI_RSC_COUNT_SERIAL_VEN:
223
224 item_count = ACPI_GET16(source) - info->value;
225
226 resource->length = resource->length + item_count;
227 ACPI_SET16(destination) = item_count;
228 break;
229
230 case ACPI_RSC_COUNT_SERIAL_RES:
231
232 item_count = (aml_resource_length +
233 sizeof(struct aml_resource_large_header))
234 - ACPI_GET16(source) - info->value;
235
236 resource->length = resource->length + item_count;
237 ACPI_SET16(destination) = item_count;
238 break;
239
166 case ACPI_RSC_LENGTH: 240 case ACPI_RSC_LENGTH:
167 241
168 resource->length = resource->length + info->value; 242 resource->length = resource->length + info->value;
@@ -183,6 +257,72 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
183 info->opcode); 257 info->opcode);
184 break; 258 break;
185 259
260 case ACPI_RSC_MOVE_GPIO_PIN:
261
262 /* Generate and set the PIN data pointer */
263
264 target = (char *)ACPI_ADD_PTR(void, resource,
265 (resource->length -
266 item_count * 2));
267 *(u16 **)destination = ACPI_CAST_PTR(u16, target);
268
269 /* Copy the PIN data */
270
271 source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
272 acpi_rs_move_data(target, source, item_count,
273 info->opcode);
274 break;
275
276 case ACPI_RSC_MOVE_GPIO_RES:
277
278 /* Generate and set the resource_source string pointer */
279
280 target = (char *)ACPI_ADD_PTR(void, resource,
281 (resource->length -
282 item_count));
283 *(u8 **)destination = ACPI_CAST_PTR(u8, target);
284
285 /* Copy the resource_source string */
286
287 source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
288 acpi_rs_move_data(target, source, item_count,
289 info->opcode);
290 break;
291
292 case ACPI_RSC_MOVE_SERIAL_VEN:
293
294 /* Generate and set the Vendor Data pointer */
295
296 target = (char *)ACPI_ADD_PTR(void, resource,
297 (resource->length -
298 item_count));
299 *(u8 **)destination = ACPI_CAST_PTR(u8, target);
300
301 /* Copy the Vendor Data */
302
303 source = ACPI_ADD_PTR(void, aml, info->value);
304 acpi_rs_move_data(target, source, item_count,
305 info->opcode);
306 break;
307
308 case ACPI_RSC_MOVE_SERIAL_RES:
309
310 /* Generate and set the resource_source string pointer */
311
312 target = (char *)ACPI_ADD_PTR(void, resource,
313 (resource->length -
314 item_count));
315 *(u8 **)destination = ACPI_CAST_PTR(u8, target);
316
317 /* Copy the resource_source string */
318
319 source =
320 ACPI_ADD_PTR(void, aml,
321 (ACPI_GET16(source) + info->value));
322 acpi_rs_move_data(target, source, item_count,
323 info->opcode);
324 break;
325
186 case ACPI_RSC_SET8: 326 case ACPI_RSC_SET8:
187 327
188 ACPI_MEMSET(destination, info->aml_offset, info->value); 328 ACPI_MEMSET(destination, info->aml_offset, info->value);
@@ -219,13 +359,18 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
219 * Optional resource_source (Index and String). This is the more 359 * Optional resource_source (Index and String). This is the more
220 * complicated case used by the Interrupt() macro 360 * complicated case used by the Interrupt() macro
221 */ 361 */
222 target = 362 target = ACPI_ADD_PTR(char, resource,
223 ACPI_ADD_PTR(char, resource, 363 info->aml_offset +
224 info->aml_offset + (item_count * 4)); 364 (item_count * 4));
225 365
226 resource->length += 366 resource->length +=
227 acpi_rs_get_resource_source(aml_resource_length, 367 acpi_rs_get_resource_source(aml_resource_length,
228 (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target); 368 (acpi_rs_length)
369 (((item_count -
370 1) * sizeof(u32)) +
371 info->value),
372 destination, aml,
373 target);
229 break; 374 break;
230 375
231 case ACPI_RSC_BITMASK: 376 case ACPI_RSC_BITMASK:
@@ -327,6 +472,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
327{ 472{
328 void *source = NULL; 473 void *source = NULL;
329 void *destination; 474 void *destination;
475 char *target;
330 acpi_rsdesc_size aml_length = 0; 476 acpi_rsdesc_size aml_length = 0;
331 u8 count; 477 u8 count;
332 u16 temp16 = 0; 478 u16 temp16 = 0;
@@ -334,6 +480,10 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
334 480
335 ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml); 481 ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
336 482
483 if (!info) {
484 return_ACPI_STATUS(AE_BAD_PARAMETER);
485 }
486
337 /* 487 /*
338 * First table entry must be ACPI_RSC_INITxxx and must contain the 488 * First table entry must be ACPI_RSC_INITxxx and must contain the
339 * table length (# of table entries) 489 * table length (# of table entries)
@@ -383,6 +533,14 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
383 ((ACPI_GET8(source) & 0x03) << info->value); 533 ((ACPI_GET8(source) & 0x03) << info->value);
384 break; 534 break;
385 535
536 case ACPI_RSC_3BITFLAG:
537 /*
538 * Mask and shift the flag bits
539 */
540 ACPI_SET8(destination) |= (u8)
541 ((ACPI_GET8(source) & 0x07) << info->value);
542 break;
543
386 case ACPI_RSC_COUNT: 544 case ACPI_RSC_COUNT:
387 545
388 item_count = ACPI_GET8(source); 546 item_count = ACPI_GET8(source);
@@ -400,6 +558,63 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
400 acpi_rs_set_resource_length(aml_length, aml); 558 acpi_rs_set_resource_length(aml_length, aml);
401 break; 559 break;
402 560
561 case ACPI_RSC_COUNT_GPIO_PIN:
562
563 item_count = ACPI_GET16(source);
564 ACPI_SET16(destination) = (u16)aml_length;
565
566 aml_length = (u16)(aml_length + item_count * 2);
567 target = ACPI_ADD_PTR(void, aml, info->value);
568 ACPI_SET16(target) = (u16)aml_length;
569 acpi_rs_set_resource_length(aml_length, aml);
570 break;
571
572 case ACPI_RSC_COUNT_GPIO_VEN:
573
574 item_count = ACPI_GET16(source);
575 ACPI_SET16(destination) = (u16)item_count;
576
577 aml_length =
578 (u16)(aml_length + (info->value * item_count));
579 acpi_rs_set_resource_length(aml_length, aml);
580 break;
581
582 case ACPI_RSC_COUNT_GPIO_RES:
583
584 /* Set resource source string length */
585
586 item_count = ACPI_GET16(source);
587 ACPI_SET16(destination) = (u16)aml_length;
588
589 /* Compute offset for the Vendor Data */
590
591 aml_length = (u16)(aml_length + item_count);
592 target = ACPI_ADD_PTR(void, aml, info->value);
593
594 /* Set vendor offset only if there is vendor data */
595
596 if (resource->data.gpio.vendor_length) {
597 ACPI_SET16(target) = (u16)aml_length;
598 }
599
600 acpi_rs_set_resource_length(aml_length, aml);
601 break;
602
603 case ACPI_RSC_COUNT_SERIAL_VEN:
604
605 item_count = ACPI_GET16(source);
606 ACPI_SET16(destination) = item_count + info->value;
607 aml_length = (u16)(aml_length + item_count);
608 acpi_rs_set_resource_length(aml_length, aml);
609 break;
610
611 case ACPI_RSC_COUNT_SERIAL_RES:
612
613 item_count = ACPI_GET16(source);
614 aml_length = (u16)(aml_length + item_count);
615 acpi_rs_set_resource_length(aml_length, aml);
616 break;
617
403 case ACPI_RSC_LENGTH: 618 case ACPI_RSC_LENGTH:
404 619
405 acpi_rs_set_resource_length(info->value, aml); 620 acpi_rs_set_resource_length(info->value, aml);
@@ -417,6 +632,48 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
417 info->opcode); 632 info->opcode);
418 break; 633 break;
419 634
635 case ACPI_RSC_MOVE_GPIO_PIN:
636
637 destination = (char *)ACPI_ADD_PTR(void, aml,
638 ACPI_GET16
639 (destination));
640 source = *(u16 **)source;
641 acpi_rs_move_data(destination, source, item_count,
642 info->opcode);
643 break;
644
645 case ACPI_RSC_MOVE_GPIO_RES:
646
647 /* Used for both resource_source string and vendor_data */
648
649 destination = (char *)ACPI_ADD_PTR(void, aml,
650 ACPI_GET16
651 (destination));
652 source = *(u8 **)source;
653 acpi_rs_move_data(destination, source, item_count,
654 info->opcode);
655 break;
656
657 case ACPI_RSC_MOVE_SERIAL_VEN:
658
659 destination = (char *)ACPI_ADD_PTR(void, aml,
660 (aml_length -
661 item_count));
662 source = *(u8 **)source;
663 acpi_rs_move_data(destination, source, item_count,
664 info->opcode);
665 break;
666
667 case ACPI_RSC_MOVE_SERIAL_RES:
668
669 destination = (char *)ACPI_ADD_PTR(void, aml,
670 (aml_length -
671 item_count));
672 source = *(u8 **)source;
673 acpi_rs_move_data(destination, source, item_count,
674 info->opcode);
675 break;
676
420 case ACPI_RSC_ADDRESS: 677 case ACPI_RSC_ADDRESS:
421 678
422 /* Set the Resource Type, General Flags, and Type-Specific Flags */ 679 /* Set the Resource Type, General Flags, and Type-Specific Flags */
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
new file mode 100644
index 00000000000..9aa5e689b44
--- /dev/null
+++ b/drivers/acpi/acpica/rsserial.c
@@ -0,0 +1,441 @@
1/*******************************************************************************
2 *
3 * Module Name: rsserial - GPIO/serial_bus resource descriptors
4 *
5 ******************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acresrc.h"
47
48#define _COMPONENT ACPI_RESOURCES
49ACPI_MODULE_NAME("rsserial")
50
51/*******************************************************************************
52 *
53 * acpi_rs_convert_gpio
54 *
55 ******************************************************************************/
56struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
57 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
58 ACPI_RS_SIZE(struct acpi_resource_gpio),
59 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
60
61 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO,
62 sizeof(struct aml_resource_gpio),
63 0},
64
65 /*
66 * These fields are contiguous in both the source and destination:
67 * revision_id
68 * connection_type
69 */
70 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id),
71 AML_OFFSET(gpio.revision_id),
72 2},
73
74 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer),
75 AML_OFFSET(gpio.flags),
76 0},
77
78 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
79 AML_OFFSET(gpio.int_flags),
80 3},
81
82 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
83 AML_OFFSET(gpio.int_flags),
84 0},
85
86 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering),
87 AML_OFFSET(gpio.int_flags),
88 0},
89
90 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity),
91 AML_OFFSET(gpio.int_flags),
92 1},
93
94 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config),
95 AML_OFFSET(gpio.pin_config),
96 1},
97
98 /*
99 * These fields are contiguous in both the source and destination:
100 * drive_strength
101 * debounce_timeout
102 */
103 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength),
104 AML_OFFSET(gpio.drive_strength),
105 2},
106
107 /* Pin Table */
108
109 {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length),
110 AML_OFFSET(gpio.pin_table_offset),
111 AML_OFFSET(gpio.res_source_offset)},
112
113 {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table),
114 AML_OFFSET(gpio.pin_table_offset),
115 0},
116
117 /* Resource Source */
118
119 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index),
120 AML_OFFSET(gpio.res_source_index),
121 1},
122
123 {ACPI_RSC_COUNT_GPIO_RES,
124 ACPI_RS_OFFSET(data.gpio.resource_source.string_length),
125 AML_OFFSET(gpio.res_source_offset),
126 AML_OFFSET(gpio.vendor_offset)},
127
128 {ACPI_RSC_MOVE_GPIO_RES,
129 ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr),
130 AML_OFFSET(gpio.res_source_offset),
131 0},
132
133 /* Vendor Data */
134
135 {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length),
136 AML_OFFSET(gpio.vendor_length),
137 1},
138
139 {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data),
140 AML_OFFSET(gpio.vendor_offset),
141 0},
142};
143
144/*******************************************************************************
145 *
146 * acpi_rs_convert_i2c_serial_bus
147 *
148 ******************************************************************************/
149
150struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
151 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
152 ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
153 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
154
155 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
156 sizeof(struct aml_resource_i2c_serialbus),
157 0},
158
159 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
160 AML_OFFSET(common_serial_bus.revision_id),
161 1},
162
163 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
164 AML_OFFSET(common_serial_bus.type),
165 1},
166
167 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
168 AML_OFFSET(common_serial_bus.flags),
169 0},
170
171 {ACPI_RSC_1BITFLAG,
172 ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
173 AML_OFFSET(common_serial_bus.flags),
174 1},
175
176 {ACPI_RSC_MOVE8,
177 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
178 AML_OFFSET(common_serial_bus.type_revision_id),
179 1},
180
181 {ACPI_RSC_MOVE16,
182 ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
183 AML_OFFSET(common_serial_bus.type_data_length),
184 1},
185
186 /* Vendor data */
187
188 {ACPI_RSC_COUNT_SERIAL_VEN,
189 ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
190 AML_OFFSET(common_serial_bus.type_data_length),
191 AML_RESOURCE_I2C_MIN_DATA_LEN},
192
193 {ACPI_RSC_MOVE_SERIAL_VEN,
194 ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
195 0,
196 sizeof(struct aml_resource_i2c_serialbus)},
197
198 /* Resource Source */
199
200 {ACPI_RSC_MOVE8,
201 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
202 AML_OFFSET(common_serial_bus.res_source_index),
203 1},
204
205 {ACPI_RSC_COUNT_SERIAL_RES,
206 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
207 AML_OFFSET(common_serial_bus.type_data_length),
208 sizeof(struct aml_resource_common_serialbus)},
209
210 {ACPI_RSC_MOVE_SERIAL_RES,
211 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
212 AML_OFFSET(common_serial_bus.type_data_length),
213 sizeof(struct aml_resource_common_serialbus)},
214
215 /* I2C bus type specific */
216
217 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode),
218 AML_OFFSET(i2c_serial_bus.type_specific_flags),
219 0},
220
221 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed),
222 AML_OFFSET(i2c_serial_bus.connection_speed),
223 1},
224
225 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address),
226 AML_OFFSET(i2c_serial_bus.slave_address),
227 1},
228};
229
230/*******************************************************************************
231 *
232 * acpi_rs_convert_spi_serial_bus
233 *
234 ******************************************************************************/
235
236struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
237 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
238 ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
239 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
240
241 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
242 sizeof(struct aml_resource_spi_serialbus),
243 0},
244
245 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
246 AML_OFFSET(common_serial_bus.revision_id),
247 1},
248
249 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
250 AML_OFFSET(common_serial_bus.type),
251 1},
252
253 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
254 AML_OFFSET(common_serial_bus.flags),
255 0},
256
257 {ACPI_RSC_1BITFLAG,
258 ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
259 AML_OFFSET(common_serial_bus.flags),
260 1},
261
262 {ACPI_RSC_MOVE8,
263 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
264 AML_OFFSET(common_serial_bus.type_revision_id),
265 1},
266
267 {ACPI_RSC_MOVE16,
268 ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
269 AML_OFFSET(common_serial_bus.type_data_length),
270 1},
271
272 /* Vendor data */
273
274 {ACPI_RSC_COUNT_SERIAL_VEN,
275 ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
276 AML_OFFSET(common_serial_bus.type_data_length),
277 AML_RESOURCE_SPI_MIN_DATA_LEN},
278
279 {ACPI_RSC_MOVE_SERIAL_VEN,
280 ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
281 0,
282 sizeof(struct aml_resource_spi_serialbus)},
283
284 /* Resource Source */
285
286 {ACPI_RSC_MOVE8,
287 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
288 AML_OFFSET(common_serial_bus.res_source_index),
289 1},
290
291 {ACPI_RSC_COUNT_SERIAL_RES,
292 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
293 AML_OFFSET(common_serial_bus.type_data_length),
294 sizeof(struct aml_resource_common_serialbus)},
295
296 {ACPI_RSC_MOVE_SERIAL_RES,
297 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
298 AML_OFFSET(common_serial_bus.type_data_length),
299 sizeof(struct aml_resource_common_serialbus)},
300
301 /* Spi bus type specific */
302
303 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode),
304 AML_OFFSET(spi_serial_bus.type_specific_flags),
305 0},
306
307 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity),
308 AML_OFFSET(spi_serial_bus.type_specific_flags),
309 1},
310
311 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length),
312 AML_OFFSET(spi_serial_bus.data_bit_length),
313 1},
314
315 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase),
316 AML_OFFSET(spi_serial_bus.clock_phase),
317 1},
318
319 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity),
320 AML_OFFSET(spi_serial_bus.clock_polarity),
321 1},
322
323 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection),
324 AML_OFFSET(spi_serial_bus.device_selection),
325 1},
326
327 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed),
328 AML_OFFSET(spi_serial_bus.connection_speed),
329 1},
330};
331
332/*******************************************************************************
333 *
334 * acpi_rs_convert_uart_serial_bus
335 *
336 ******************************************************************************/
337
338struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
339 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
340 ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
341 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
342
343 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
344 sizeof(struct aml_resource_uart_serialbus),
345 0},
346
347 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
348 AML_OFFSET(common_serial_bus.revision_id),
349 1},
350
351 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
352 AML_OFFSET(common_serial_bus.type),
353 1},
354
355 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
356 AML_OFFSET(common_serial_bus.flags),
357 0},
358
359 {ACPI_RSC_1BITFLAG,
360 ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
361 AML_OFFSET(common_serial_bus.flags),
362 1},
363
364 {ACPI_RSC_MOVE8,
365 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
366 AML_OFFSET(common_serial_bus.type_revision_id),
367 1},
368
369 {ACPI_RSC_MOVE16,
370 ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
371 AML_OFFSET(common_serial_bus.type_data_length),
372 1},
373
374 /* Vendor data */
375
376 {ACPI_RSC_COUNT_SERIAL_VEN,
377 ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
378 AML_OFFSET(common_serial_bus.type_data_length),
379 AML_RESOURCE_UART_MIN_DATA_LEN},
380
381 {ACPI_RSC_MOVE_SERIAL_VEN,
382 ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
383 0,
384 sizeof(struct aml_resource_uart_serialbus)},
385
386 /* Resource Source */
387
388 {ACPI_RSC_MOVE8,
389 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
390 AML_OFFSET(common_serial_bus.res_source_index),
391 1},
392
393 {ACPI_RSC_COUNT_SERIAL_RES,
394 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
395 AML_OFFSET(common_serial_bus.type_data_length),
396 sizeof(struct aml_resource_common_serialbus)},
397
398 {ACPI_RSC_MOVE_SERIAL_RES,
399 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
400 AML_OFFSET(common_serial_bus.type_data_length),
401 sizeof(struct aml_resource_common_serialbus)},
402
403 /* Uart bus type specific */
404
405 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control),
406 AML_OFFSET(uart_serial_bus.type_specific_flags),
407 0},
408
409 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits),
410 AML_OFFSET(uart_serial_bus.type_specific_flags),
411 2},
412
413 {ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits),
414 AML_OFFSET(uart_serial_bus.type_specific_flags),
415 4},
416
417 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian),
418 AML_OFFSET(uart_serial_bus.type_specific_flags),
419 7},
420
421 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity),
422 AML_OFFSET(uart_serial_bus.parity),
423 1},
424
425 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled),
426 AML_OFFSET(uart_serial_bus.lines_enabled),
427 1},
428
429 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size),
430 AML_OFFSET(uart_serial_bus.rx_fifo_size),
431 1},
432
433 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size),
434 AML_OFFSET(uart_serial_bus.tx_fifo_size),
435 1},
436
437 {ACPI_RSC_MOVE32,
438 ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate),
439 AML_OFFSET(uart_serial_bus.default_baud_rate),
440 1},
441};
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 231811e5693..433a375deb9 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,9 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
144 * since there are no alignment or endian issues 144 * since there are no alignment or endian issues
145 */ 145 */
146 case ACPI_RSC_MOVE8: 146 case ACPI_RSC_MOVE8:
147 case ACPI_RSC_MOVE_GPIO_RES:
148 case ACPI_RSC_MOVE_SERIAL_VEN:
149 case ACPI_RSC_MOVE_SERIAL_RES:
147 ACPI_MEMCPY(destination, source, item_count); 150 ACPI_MEMCPY(destination, source, item_count);
148 return; 151 return;
149 152
@@ -153,6 +156,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
153 * misaligned memory transfers 156 * misaligned memory transfers
154 */ 157 */
155 case ACPI_RSC_MOVE16: 158 case ACPI_RSC_MOVE16:
159 case ACPI_RSC_MOVE_GPIO_PIN:
156 ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i], 160 ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
157 &ACPI_CAST_PTR(u16, source)[i]); 161 &ACPI_CAST_PTR(u16, source)[i]);
158 break; 162 break;
@@ -590,6 +594,56 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
590 594
591/******************************************************************************* 595/*******************************************************************************
592 * 596 *
597 * FUNCTION: acpi_rs_get_aei_method_data
598 *
599 * PARAMETERS: Node - Device node
600 * ret_buffer - Pointer to a buffer structure for the
601 * results
602 *
603 * RETURN: Status
604 *
605 * DESCRIPTION: This function is called to get the _AEI value of an object
606 * contained in an object specified by the handle passed in
607 *
608 * If the function fails an appropriate status will be returned
609 * and the contents of the callers buffer is undefined.
610 *
611 ******************************************************************************/
612
613acpi_status
614acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
615 struct acpi_buffer *ret_buffer)
616{
617 union acpi_operand_object *obj_desc;
618 acpi_status status;
619
620 ACPI_FUNCTION_TRACE(rs_get_aei_method_data);
621
622 /* Parameters guaranteed valid by caller */
623
624 /* Execute the method, no parameters */
625
626 status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI,
627 ACPI_BTYPE_BUFFER, &obj_desc);
628 if (ACPI_FAILURE(status)) {
629 return_ACPI_STATUS(status);
630 }
631
632 /*
633 * Make the call to create a resource linked list from the
634 * byte stream buffer that comes back from the _CRS method
635 * execution.
636 */
637 status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
638
639 /* On exit, we must delete the object returned by evaluate_object */
640
641 acpi_ut_remove_reference(obj_desc);
642 return_ACPI_STATUS(status);
643}
644
645/*******************************************************************************
646 *
593 * FUNCTION: acpi_rs_get_method_data 647 * FUNCTION: acpi_rs_get_method_data
594 * 648 *
595 * PARAMETERS: Handle - Handle to the containing object 649 * PARAMETERS: Handle - Handle to the containing object
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index fe86b37b16c..f58c098c7ae 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -307,6 +307,46 @@ acpi_set_current_resources(acpi_handle device_handle,
307 307
308ACPI_EXPORT_SYMBOL(acpi_set_current_resources) 308ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
309 309
310/*******************************************************************************
311 *
312 * FUNCTION: acpi_get_event_resources
313 *
314 * PARAMETERS: device_handle - Handle to the device object for the
315 * device we are getting resources
316 * in_buffer - Pointer to a buffer containing the
317 * resources to be set for the device
318 *
319 * RETURN: Status
320 *
321 * DESCRIPTION: This function is called to get the event resources for a
322 * specific device. The caller must first acquire a handle for
323 * the desired device. The resource data is passed to the routine
324 * the buffer pointed to by the in_buffer variable. Uses the
325 * _AEI method.
326 *
327 ******************************************************************************/
328acpi_status
329acpi_get_event_resources(acpi_handle device_handle,
330 struct acpi_buffer *ret_buffer)
331{
332 acpi_status status;
333 struct acpi_namespace_node *node;
334
335 ACPI_FUNCTION_TRACE(acpi_get_event_resources);
336
337 /* Validate parameters then dispatch to internal routine */
338
339 status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
340 if (ACPI_FAILURE(status)) {
341 return_ACPI_STATUS(status);
342 }
343
344 status = acpi_rs_get_aei_method_data(node, ret_buffer);
345 return_ACPI_STATUS(status);
346}
347
348ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
349
310/****************************************************************************** 350/******************************************************************************
311 * 351 *
312 * FUNCTION: acpi_resource_to_address64 352 * FUNCTION: acpi_resource_to_address64
@@ -486,8 +526,9 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
486 * 526 *
487 * PARAMETERS: device_handle - Handle to the device object for the 527 * PARAMETERS: device_handle - Handle to the device object for the
488 * device we are querying 528 * device we are querying
489 * Name - Method name of the resources we want 529 * Name - Method name of the resources we want.
490 * (METHOD_NAME__CRS or METHOD_NAME__PRS) 530 * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
531 * METHOD_NAME__AEI)
491 * user_function - Called for each resource 532 * user_function - Called for each resource
492 * Context - Passed to user_function 533 * Context - Passed to user_function
493 * 534 *
@@ -514,11 +555,12 @@ acpi_walk_resources(acpi_handle device_handle,
514 555
515 if (!device_handle || !user_function || !name || 556 if (!device_handle || !user_function || !name ||
516 (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) && 557 (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
517 !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) { 558 !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
559 !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
518 return_ACPI_STATUS(AE_BAD_PARAMETER); 560 return_ACPI_STATUS(AE_BAD_PARAMETER);
519 } 561 }
520 562
521 /* Get the _CRS or _PRS resource list */ 563 /* Get the _CRS/_PRS/_AEI resource list */
522 564
523 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; 565 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
524 status = acpi_rs_get_method_data(device_handle, name, &buffer); 566 status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 6f5588e62c0..c5d870406f4 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,15 @@ static void acpi_tb_setup_fadt_registers(void);
63 63
64typedef struct acpi_fadt_info { 64typedef struct acpi_fadt_info {
65 char *name; 65 char *name;
66 u8 address64; 66 u16 address64;
67 u8 address32; 67 u16 address32;
68 u8 length; 68 u16 length;
69 u8 default_length; 69 u8 default_length;
70 u8 type; 70 u8 type;
71 71
72} acpi_fadt_info; 72} acpi_fadt_info;
73 73
74#define ACPI_FADT_OPTIONAL 0
74#define ACPI_FADT_REQUIRED 1 75#define ACPI_FADT_REQUIRED 1
75#define ACPI_FADT_SEPARATE_LENGTH 2 76#define ACPI_FADT_SEPARATE_LENGTH 2
76 77
@@ -87,7 +88,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
87 ACPI_FADT_OFFSET(pm1b_event_block), 88 ACPI_FADT_OFFSET(pm1b_event_block),
88 ACPI_FADT_OFFSET(pm1_event_length), 89 ACPI_FADT_OFFSET(pm1_event_length),
89 ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */ 90 ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */
90 0}, 91 ACPI_FADT_OPTIONAL},
91 92
92 {"Pm1aControlBlock", 93 {"Pm1aControlBlock",
93 ACPI_FADT_OFFSET(xpm1a_control_block), 94 ACPI_FADT_OFFSET(xpm1a_control_block),
@@ -101,7 +102,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
101 ACPI_FADT_OFFSET(pm1b_control_block), 102 ACPI_FADT_OFFSET(pm1b_control_block),
102 ACPI_FADT_OFFSET(pm1_control_length), 103 ACPI_FADT_OFFSET(pm1_control_length),
103 ACPI_PM1_REGISTER_WIDTH, 104 ACPI_PM1_REGISTER_WIDTH,
104 0}, 105 ACPI_FADT_OPTIONAL},
105 106
106 {"Pm2ControlBlock", 107 {"Pm2ControlBlock",
107 ACPI_FADT_OFFSET(xpm2_control_block), 108 ACPI_FADT_OFFSET(xpm2_control_block),
@@ -139,7 +140,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
139 140
140typedef struct acpi_fadt_pm_info { 141typedef struct acpi_fadt_pm_info {
141 struct acpi_generic_address *target; 142 struct acpi_generic_address *target;
142 u8 source; 143 u16 source;
143 u8 register_num; 144 u8 register_num;
144 145
145} acpi_fadt_pm_info; 146} acpi_fadt_pm_info;
@@ -253,8 +254,13 @@ void acpi_tb_parse_fadt(u32 table_index)
253 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt, 254 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
254 ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT); 255 ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
255 256
256 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs, 257 /* If Hardware Reduced flag is set, there is no FACS */
257 ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS); 258
259 if (!acpi_gbl_reduced_hardware) {
260 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.
261 Xfacs, ACPI_SIG_FACS,
262 ACPI_TABLE_INDEX_FACS);
263 }
258} 264}
259 265
260/******************************************************************************* 266/*******************************************************************************
@@ -277,12 +283,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
277{ 283{
278 /* 284 /*
279 * Check if the FADT is larger than the largest table that we expect 285 * Check if the FADT is larger than the largest table that we expect
280 * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue 286 * (the ACPI 5.0 version). If so, truncate the table, and issue
281 * a warning. 287 * a warning.
282 */ 288 */
283 if (length > sizeof(struct acpi_table_fadt)) { 289 if (length > sizeof(struct acpi_table_fadt)) {
284 ACPI_WARNING((AE_INFO, 290 ACPI_WARNING((AE_INFO,
285 "FADT (revision %u) is longer than ACPI 2.0 version, " 291 "FADT (revision %u) is longer than ACPI 5.0 version, "
286 "truncating length %u to %u", 292 "truncating length %u to %u",
287 table->revision, length, 293 table->revision, length,
288 (u32)sizeof(struct acpi_table_fadt))); 294 (u32)sizeof(struct acpi_table_fadt)));
@@ -297,6 +303,13 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
297 ACPI_MEMCPY(&acpi_gbl_FADT, table, 303 ACPI_MEMCPY(&acpi_gbl_FADT, table,
298 ACPI_MIN(length, sizeof(struct acpi_table_fadt))); 304 ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
299 305
306 /* Take a copy of the Hardware Reduced flag */
307
308 acpi_gbl_reduced_hardware = FALSE;
309 if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) {
310 acpi_gbl_reduced_hardware = TRUE;
311 }
312
300 /* Convert the local copy of the FADT to the common internal format */ 313 /* Convert the local copy of the FADT to the common internal format */
301 314
302 acpi_tb_convert_fadt(); 315 acpi_tb_convert_fadt();
@@ -502,6 +515,12 @@ static void acpi_tb_validate_fadt(void)
502 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt; 515 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
503 } 516 }
504 517
518 /* If Hardware Reduced flag is set, we are all done */
519
520 if (acpi_gbl_reduced_hardware) {
521 return;
522 }
523
505 /* Examine all of the 64-bit extended address fields (X fields) */ 524 /* Examine all of the 64-bit extended address fields (X fields) */
506 525
507 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { 526 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index a55cb2bb5ab..4903e36ea75 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 62365f6075d..1aecf7baa4e 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0f2d395feab..09ca39e1433 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -135,6 +135,13 @@ acpi_status acpi_tb_initialize_facs(void)
135{ 135{
136 acpi_status status; 136 acpi_status status;
137 137
138 /* If Hardware Reduced flag is set, there is no FACS */
139
140 if (acpi_gbl_reduced_hardware) {
141 acpi_gbl_FACS = NULL;
142 return (AE_OK);
143 }
144
138 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, 145 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
139 ACPI_CAST_INDIRECT_PTR(struct 146 ACPI_CAST_INDIRECT_PTR(struct
140 acpi_table_header, 147 acpi_table_header,
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index e7d13f5d3f2..abcc6412c24 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 7eb6c6cc1ed..4258f647ca3 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
new file mode 100644
index 00000000000..67932aebe6d
--- /dev/null
+++ b/drivers/acpi/acpica/utaddress.c
@@ -0,0 +1,294 @@
1/******************************************************************************
2 *
3 * Module Name: utaddress - op_region address range check
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acnamesp.h"
47
48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utaddress")
50
51/*******************************************************************************
52 *
53 * FUNCTION: acpi_ut_add_address_range
54 *
55 * PARAMETERS: space_id - Address space ID
56 * Address - op_region start address
57 * Length - op_region length
58 * region_node - op_region namespace node
59 *
60 * RETURN: Status
61 *
62 * DESCRIPTION: Add the Operation Region address range to the global list.
63 * The only supported Space IDs are Memory and I/O. Called when
64 * the op_region address/length operands are fully evaluated.
65 *
66 * MUTEX: Locks the namespace
67 *
68 * NOTE: Because this interface is only called when an op_region argument
69 * list is evaluated, there cannot be any duplicate region_nodes.
70 * Duplicate Address/Length values are allowed, however, so that multiple
71 * address conflicts can be detected.
72 *
73 ******************************************************************************/
74acpi_status
75acpi_ut_add_address_range(acpi_adr_space_type space_id,
76 acpi_physical_address address,
77 u32 length, struct acpi_namespace_node *region_node)
78{
79 struct acpi_address_range *range_info;
80 acpi_status status;
81
82 ACPI_FUNCTION_TRACE(ut_add_address_range);
83
84 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
85 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
86 return_ACPI_STATUS(AE_OK);
87 }
88
89 /* Allocate/init a new info block, add it to the appropriate list */
90
91 range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range));
92 if (!range_info) {
93 return_ACPI_STATUS(AE_NO_MEMORY);
94 }
95
96 range_info->start_address = address;
97 range_info->end_address = (address + length - 1);
98 range_info->region_node = region_node;
99
100 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
101 if (ACPI_FAILURE(status)) {
102 ACPI_FREE(range_info);
103 return_ACPI_STATUS(status);
104 }
105
106 range_info->next = acpi_gbl_address_range_list[space_id];
107 acpi_gbl_address_range_list[space_id] = range_info;
108
109 ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
110 "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
111 acpi_ut_get_node_name(range_info->region_node),
112 ACPI_CAST_PTR(void, address),
113 ACPI_CAST_PTR(void, range_info->end_address)));
114
115 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
116 return_ACPI_STATUS(AE_OK);
117}
118
119/*******************************************************************************
120 *
121 * FUNCTION: acpi_ut_remove_address_range
122 *
123 * PARAMETERS: space_id - Address space ID
124 * region_node - op_region namespace node
125 *
126 * RETURN: None
127 *
128 * DESCRIPTION: Remove the Operation Region from the global list. The only
129 * supported Space IDs are Memory and I/O. Called when an
130 * op_region is deleted.
131 *
132 * MUTEX: Assumes the namespace is locked
133 *
134 ******************************************************************************/
135
136void
137acpi_ut_remove_address_range(acpi_adr_space_type space_id,
138 struct acpi_namespace_node *region_node)
139{
140 struct acpi_address_range *range_info;
141 struct acpi_address_range *prev;
142
143 ACPI_FUNCTION_TRACE(ut_remove_address_range);
144
145 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
146 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
147 return_VOID;
148 }
149
150 /* Get the appropriate list head and check the list */
151
152 range_info = prev = acpi_gbl_address_range_list[space_id];
153 while (range_info) {
154 if (range_info->region_node == region_node) {
155 if (range_info == prev) { /* Found at list head */
156 acpi_gbl_address_range_list[space_id] =
157 range_info->next;
158 } else {
159 prev->next = range_info->next;
160 }
161
162 ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
163 "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
164 acpi_ut_get_node_name(range_info->
165 region_node),
166 ACPI_CAST_PTR(void,
167 range_info->
168 start_address),
169 ACPI_CAST_PTR(void,
170 range_info->
171 end_address)));
172
173 ACPI_FREE(range_info);
174 return_VOID;
175 }
176
177 prev = range_info;
178 range_info = range_info->next;
179 }
180
181 return_VOID;
182}
183
184/*******************************************************************************
185 *
186 * FUNCTION: acpi_ut_check_address_range
187 *
188 * PARAMETERS: space_id - Address space ID
189 * Address - Start address
190 * Length - Length of address range
191 * Warn - TRUE if warning on overlap desired
192 *
193 * RETURN: Count of the number of conflicts detected. Zero is always
194 * returned for Space IDs other than Memory or I/O.
195 *
196 * DESCRIPTION: Check if the input address range overlaps any of the
197 * ASL operation region address ranges. The only supported
198 * Space IDs are Memory and I/O.
199 *
200 * MUTEX: Assumes the namespace is locked.
201 *
202 ******************************************************************************/
203
204u32
205acpi_ut_check_address_range(acpi_adr_space_type space_id,
206 acpi_physical_address address, u32 length, u8 warn)
207{
208 struct acpi_address_range *range_info;
209 acpi_physical_address end_address;
210 char *pathname;
211 u32 overlap_count = 0;
212
213 ACPI_FUNCTION_TRACE(ut_check_address_range);
214
215 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
216 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
217 return_UINT32(0);
218 }
219
220 range_info = acpi_gbl_address_range_list[space_id];
221 end_address = address + length - 1;
222
223 /* Check entire list for all possible conflicts */
224
225 while (range_info) {
226 /*
227 * Check if the requested Address/Length overlaps this address_range.
228 * Four cases to consider:
229 *
230 * 1) Input address/length is contained completely in the address range
231 * 2) Input address/length overlaps range at the range start
232 * 3) Input address/length overlaps range at the range end
233 * 4) Input address/length completely encompasses the range
234 */
235 if ((address <= range_info->end_address) &&
236 (end_address >= range_info->start_address)) {
237
238 /* Found an address range overlap */
239
240 overlap_count++;
241 if (warn) { /* Optional warning message */
242 pathname =
243 acpi_ns_get_external_pathname(range_info->
244 region_node);
245
246 ACPI_WARNING((AE_INFO,
247 "0x%p-0x%p %s conflicts with Region %s %d",
248 ACPI_CAST_PTR(void, address),
249 ACPI_CAST_PTR(void, end_address),
250 acpi_ut_get_region_name(space_id),
251 pathname, overlap_count));
252 ACPI_FREE(pathname);
253 }
254 }
255
256 range_info = range_info->next;
257 }
258
259 return_UINT32(overlap_count);
260}
261
262/*******************************************************************************
263 *
264 * FUNCTION: acpi_ut_delete_address_lists
265 *
266 * PARAMETERS: None
267 *
268 * RETURN: None
269 *
270 * DESCRIPTION: Delete all global address range lists (called during
271 * subsystem shutdown).
272 *
273 ******************************************************************************/
274
275void acpi_ut_delete_address_lists(void)
276{
277 struct acpi_address_range *next;
278 struct acpi_address_range *range_info;
279 int i;
280
281 /* Delete all elements in all address range lists */
282
283 for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
284 next = acpi_gbl_address_range_list[i];
285
286 while (next) {
287 range_info = next;
288 next = range_info->next;
289 ACPI_FREE(range_info);
290 }
291
292 acpi_gbl_address_range_list[i] = NULL;
293 }
294}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 0a697351cf6..9982d2ea66f 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index aded299a2fa..3317c0a406e 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index a1f8d7509e6..a0998a88631 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 8b087e2d64f..d42ede5260c 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -171,7 +171,9 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
171 "SMBus", 171 "SMBus",
172 "SystemCMOS", 172 "SystemCMOS",
173 "PCIBARTarget", 173 "PCIBARTarget",
174 "IPMI" 174 "IPMI",
175 "GeneralPurposeIo",
176 "GenericSerialBus"
175}; 177};
176 178
177char *acpi_ut_get_region_name(u8 space_id) 179char *acpi_ut_get_region_name(u8 space_id)
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 31f5a7832ef..2a6c3e18369 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -215,11 +215,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
215 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 215 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
216 "***** Region %p\n", object)); 216 "***** Region %p\n", object));
217 217
218 /* Invalidate the region address/length via the host OS */ 218 /*
219 219 * Update address_range list. However, only permanent regions
220 acpi_os_invalidate_address(object->region.space_id, 220 * are installed in this list. (Not created within a method)
221 object->region.address, 221 */
222 (acpi_size) object->region.length); 222 if (!(object->region.node->flags & ANOBJ_TEMPORARY)) {
223 acpi_ut_remove_address_range(object->region.space_id,
224 object->region.node);
225 }
223 226
224 second_desc = acpi_ns_get_secondary_object(object); 227 second_desc = acpi_ns_get_secondary_object(object);
225 if (second_desc) { 228 if (second_desc) {
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 18f73c9d10b..479f32b3341 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ffba0a39c3e..4153584cf52 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -264,6 +264,12 @@ acpi_status acpi_ut_init_globals(void)
264 return_ACPI_STATUS(status); 264 return_ACPI_STATUS(status);
265 } 265 }
266 266
267 /* Address Range lists */
268
269 for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
270 acpi_gbl_address_range_list[i] = NULL;
271 }
272
267 /* Mutex locked flags */ 273 /* Mutex locked flags */
268 274
269 for (i = 0; i < ACPI_NUM_MUTEX; i++) { 275 for (i = 0; i < ACPI_NUM_MUTEX; i++) {
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index b679ea69354..c92eb1d9378 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 191b6828cce..8359c0c5dc9 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,7 @@ static void acpi_ut_terminate(void)
92 gpe_xrupt_info = next_gpe_xrupt_info; 92 gpe_xrupt_info = next_gpe_xrupt_info;
93 } 93 }
94 94
95 acpi_ut_delete_address_lists();
95 return_VOID; 96 return_VOID;
96} 97}
97 98
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index f6bb75c6faf..155fd786d0f 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index ce481da9bb4..2491a552b0e 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c33a852d4f4..86f19db74e0 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 7d797e2baec..43174df3312 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -293,14 +293,10 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
293 293
294acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) 294acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
295{ 295{
296 acpi_thread_id this_thread_id;
297
298 ACPI_FUNCTION_NAME(ut_release_mutex); 296 ACPI_FUNCTION_NAME(ut_release_mutex);
299 297
300 this_thread_id = acpi_os_get_thread_id();
301
302 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n", 298 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
303 (u32)this_thread_id, 299 (u32)acpi_os_get_thread_id(),
304 acpi_ut_get_mutex_name(mutex_id))); 300 acpi_ut_get_mutex_name(mutex_id)));
305 301
306 if (mutex_id > ACPI_MAX_MUTEX) { 302 if (mutex_id > ACPI_MAX_MUTEX) {
@@ -329,7 +325,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
329 * the ACPI subsystem code. 325 * the ACPI subsystem code.
330 */ 326 */
331 for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { 327 for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
332 if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { 328 if (acpi_gbl_mutex_info[i].thread_id ==
329 acpi_os_get_thread_id()) {
333 if (i == mutex_id) { 330 if (i == mutex_id) {
334 continue; 331 continue;
335 } 332 }
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 188340a017b..b112744fc9a 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 1fb10cb8f11..2360cf70c18 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 6ffd3a8bdaa..9d441ea7030 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h" 45#include "accommon.h"
46#include "amlresrc.h" 46#include "acresrc.h"
47 47
48#define _COMPONENT ACPI_UTILITIES 48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utresrc") 49ACPI_MODULE_NAME("utresrc")
@@ -154,6 +154,138 @@ const char *acpi_gbl_typ_decode[] = {
154 "TypeF" 154 "TypeF"
155}; 155};
156 156
157const char *acpi_gbl_ppc_decode[] = {
158 "PullDefault",
159 "PullUp",
160 "PullDown",
161 "PullNone"
162};
163
164const char *acpi_gbl_ior_decode[] = {
165 "IoRestrictionNone",
166 "IoRestrictionInputOnly",
167 "IoRestrictionOutputOnly",
168 "IoRestrictionNoneAndPreserve"
169};
170
171const char *acpi_gbl_dts_decode[] = {
172 "Width8bit",
173 "Width16bit",
174 "Width32bit",
175 "Width64bit",
176 "Width128bit",
177 "Width256bit",
178};
179
180/* GPIO connection type */
181
182const char *acpi_gbl_ct_decode[] = {
183 "Interrupt",
184 "I/O"
185};
186
187/* Serial bus type */
188
189const char *acpi_gbl_sbt_decode[] = {
190 "/* UNKNOWN serial bus type */",
191 "I2C",
192 "SPI",
193 "UART"
194};
195
196/* I2C serial bus access mode */
197
198const char *acpi_gbl_am_decode[] = {
199 "AddressingMode7Bit",
200 "AddressingMode10Bit"
201};
202
203/* I2C serial bus slave mode */
204
205const char *acpi_gbl_sm_decode[] = {
206 "ControllerInitiated",
207 "DeviceInitiated"
208};
209
210/* SPI serial bus wire mode */
211
212const char *acpi_gbl_wm_decode[] = {
213 "FourWireMode",
214 "ThreeWireMode"
215};
216
217/* SPI serial clock phase */
218
219const char *acpi_gbl_cph_decode[] = {
220 "ClockPhaseFirst",
221 "ClockPhaseSecond"
222};
223
224/* SPI serial bus clock polarity */
225
226const char *acpi_gbl_cpo_decode[] = {
227 "ClockPolarityLow",
228 "ClockPolarityHigh"
229};
230
231/* SPI serial bus device polarity */
232
233const char *acpi_gbl_dp_decode[] = {
234 "PolarityLow",
235 "PolarityHigh"
236};
237
238/* UART serial bus endian */
239
240const char *acpi_gbl_ed_decode[] = {
241 "LittleEndian",
242 "BigEndian"
243};
244
245/* UART serial bus bits per byte */
246
247const char *acpi_gbl_bpb_decode[] = {
248 "DataBitsFive",
249 "DataBitsSix",
250 "DataBitsSeven",
251 "DataBitsEight",
252 "DataBitsNine",
253 "/* UNKNOWN Bits per byte */",
254 "/* UNKNOWN Bits per byte */",
255 "/* UNKNOWN Bits per byte */"
256};
257
258/* UART serial bus stop bits */
259
260const char *acpi_gbl_sb_decode[] = {
261 "StopBitsNone",
262 "StopBitsOne",
263 "StopBitsOnePlusHalf",
264 "StopBitsTwo"
265};
266
267/* UART serial bus flow control */
268
269const char *acpi_gbl_fc_decode[] = {
270 "FlowControlNone",
271 "FlowControlHardware",
272 "FlowControlXON",
273 "/* UNKNOWN flow control keyword */"
274};
275
276/* UART serial bus parity type */
277
278const char *acpi_gbl_pt_decode[] = {
279 "ParityTypeNone",
280 "ParityTypeEven",
281 "ParityTypeOdd",
282 "ParityTypeMark",
283 "ParityTypeSpace",
284 "/* UNKNOWN parity keyword */",
285 "/* UNKNOWN parity keyword */",
286 "/* UNKNOWN parity keyword */"
287};
288
157#endif 289#endif
158 290
159/* 291/*
@@ -173,7 +305,7 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
173 ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent), 305 ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
174 ACPI_AML_SIZE_SMALL(struct aml_resource_io), 306 ACPI_AML_SIZE_SMALL(struct aml_resource_io),
175 ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io), 307 ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
176 0, 308 ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma),
177 0, 309 0,
178 0, 310 0,
179 0, 311 0,
@@ -193,7 +325,17 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
193 ACPI_AML_SIZE_LARGE(struct aml_resource_address16), 325 ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
194 ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq), 326 ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
195 ACPI_AML_SIZE_LARGE(struct aml_resource_address64), 327 ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
196 ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64) 328 ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
329 ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
330 0,
331 ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
332};
333
334const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
335 0,
336 ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
337 ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
338 ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
197}; 339};
198 340
199/* 341/*
@@ -209,35 +351,49 @@ static const u8 acpi_gbl_resource_types[] = {
209 0, 351 0,
210 0, 352 0,
211 0, 353 0,
212 ACPI_SMALL_VARIABLE_LENGTH, 354 ACPI_SMALL_VARIABLE_LENGTH, /* 04 IRQ */
213 ACPI_FIXED_LENGTH, 355 ACPI_FIXED_LENGTH, /* 05 DMA */
214 ACPI_SMALL_VARIABLE_LENGTH, 356 ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */
215 ACPI_FIXED_LENGTH, 357 ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */
216 ACPI_FIXED_LENGTH, 358 ACPI_FIXED_LENGTH, /* 08 IO */
217 ACPI_FIXED_LENGTH, 359 ACPI_FIXED_LENGTH, /* 09 fixed_iO */
218 0, 360 ACPI_FIXED_LENGTH, /* 0_a fixed_dMA */
219 0, 361 0,
220 0, 362 0,
221 0, 363 0,
222 ACPI_VARIABLE_LENGTH, 364 ACPI_VARIABLE_LENGTH, /* 0_e vendor_short */
223 ACPI_FIXED_LENGTH, 365 ACPI_FIXED_LENGTH, /* 0_f end_tag */
224 366
225 /* Large descriptors */ 367 /* Large descriptors */
226 368
227 0, 369 0,
228 ACPI_FIXED_LENGTH, 370 ACPI_FIXED_LENGTH, /* 01 Memory24 */
229 ACPI_FIXED_LENGTH, 371 ACPI_FIXED_LENGTH, /* 02 generic_register */
230 0, 372 0,
231 ACPI_VARIABLE_LENGTH, 373 ACPI_VARIABLE_LENGTH, /* 04 vendor_long */
232 ACPI_FIXED_LENGTH, 374 ACPI_FIXED_LENGTH, /* 05 Memory32 */
233 ACPI_FIXED_LENGTH, 375 ACPI_FIXED_LENGTH, /* 06 memory32_fixed */
234 ACPI_VARIABLE_LENGTH, 376 ACPI_VARIABLE_LENGTH, /* 07 Dword* address */
235 ACPI_VARIABLE_LENGTH, 377 ACPI_VARIABLE_LENGTH, /* 08 Word* address */
236 ACPI_VARIABLE_LENGTH, 378 ACPI_VARIABLE_LENGTH, /* 09 extended_iRQ */
237 ACPI_VARIABLE_LENGTH, 379 ACPI_VARIABLE_LENGTH, /* 0_a Qword* address */
238 ACPI_FIXED_LENGTH 380 ACPI_FIXED_LENGTH, /* 0_b Extended* address */
381 ACPI_VARIABLE_LENGTH, /* 0_c Gpio* */
382 0,
383 ACPI_VARIABLE_LENGTH /* 0_e *serial_bus */
239}; 384};
240 385
386/*
387 * For the i_aSL compiler/disassembler, we don't want any error messages
388 * because the disassembler uses the resource validation code to determine
389 * if Buffer objects are actually Resource Templates.
390 */
391#ifdef ACPI_ASL_COMPILER
392#define ACPI_RESOURCE_ERROR(plist)
393#else
394#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist)
395#endif
396
241/******************************************************************************* 397/*******************************************************************************
242 * 398 *
243 * FUNCTION: acpi_ut_walk_aml_resources 399 * FUNCTION: acpi_ut_walk_aml_resources
@@ -265,6 +421,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
265 u8 resource_index; 421 u8 resource_index;
266 u32 length; 422 u32 length;
267 u32 offset = 0; 423 u32 offset = 0;
424 u8 end_tag[2] = { 0x79, 0x00 };
268 425
269 ACPI_FUNCTION_TRACE(ut_walk_aml_resources); 426 ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
270 427
@@ -286,6 +443,10 @@ acpi_ut_walk_aml_resources(u8 * aml,
286 443
287 status = acpi_ut_validate_resource(aml, &resource_index); 444 status = acpi_ut_validate_resource(aml, &resource_index);
288 if (ACPI_FAILURE(status)) { 445 if (ACPI_FAILURE(status)) {
446 /*
447 * Exit on failure. Cannot continue because the descriptor length
448 * may be bogus also.
449 */
289 return_ACPI_STATUS(status); 450 return_ACPI_STATUS(status);
290 } 451 }
291 452
@@ -300,7 +461,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
300 user_function(aml, length, offset, resource_index, 461 user_function(aml, length, offset, resource_index,
301 context); 462 context);
302 if (ACPI_FAILURE(status)) { 463 if (ACPI_FAILURE(status)) {
303 return (status); 464 return_ACPI_STATUS(status);
304 } 465 }
305 } 466 }
306 467
@@ -333,7 +494,19 @@ acpi_ut_walk_aml_resources(u8 * aml,
333 494
334 /* Did not find an end_tag descriptor */ 495 /* Did not find an end_tag descriptor */
335 496
336 return (AE_AML_NO_RESOURCE_END_TAG); 497 if (user_function) {
498
499 /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
500
501 (void)acpi_ut_validate_resource(end_tag, &resource_index);
502 status =
503 user_function(end_tag, 2, offset, resource_index, context);
504 if (ACPI_FAILURE(status)) {
505 return_ACPI_STATUS(status);
506 }
507 }
508
509 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
337} 510}
338 511
339/******************************************************************************* 512/*******************************************************************************
@@ -354,6 +527,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
354 527
355acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) 528acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
356{ 529{
530 union aml_resource *aml_resource;
357 u8 resource_type; 531 u8 resource_type;
358 u8 resource_index; 532 u8 resource_index;
359 acpi_rs_length resource_length; 533 acpi_rs_length resource_length;
@@ -375,7 +549,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
375 /* Verify the large resource type (name) against the max */ 549 /* Verify the large resource type (name) against the max */
376 550
377 if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) { 551 if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
378 return (AE_AML_INVALID_RESOURCE_TYPE); 552 goto invalid_resource;
379 } 553 }
380 554
381 /* 555 /*
@@ -392,15 +566,17 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
392 ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3); 566 ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
393 } 567 }
394 568
395 /* Check validity of the resource type, zero indicates name is invalid */ 569 /*
396 570 * Check validity of the resource type, via acpi_gbl_resource_types. Zero
571 * indicates an invalid resource.
572 */
397 if (!acpi_gbl_resource_types[resource_index]) { 573 if (!acpi_gbl_resource_types[resource_index]) {
398 return (AE_AML_INVALID_RESOURCE_TYPE); 574 goto invalid_resource;
399 } 575 }
400 576
401 /* 577 /*
402 * 2) Validate the resource_length field. This ensures that the length 578 * Validate the resource_length field. This ensures that the length
403 * is at least reasonable, and guarantees that it is non-zero. 579 * is at least reasonable, and guarantees that it is non-zero.
404 */ 580 */
405 resource_length = acpi_ut_get_resource_length(aml); 581 resource_length = acpi_ut_get_resource_length(aml);
406 minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index]; 582 minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
@@ -413,7 +589,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
413 /* Fixed length resource, length must match exactly */ 589 /* Fixed length resource, length must match exactly */
414 590
415 if (resource_length != minimum_resource_length) { 591 if (resource_length != minimum_resource_length) {
416 return (AE_AML_BAD_RESOURCE_LENGTH); 592 goto bad_resource_length;
417 } 593 }
418 break; 594 break;
419 595
@@ -422,7 +598,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
422 /* Variable length resource, length must be at least the minimum */ 598 /* Variable length resource, length must be at least the minimum */
423 599
424 if (resource_length < minimum_resource_length) { 600 if (resource_length < minimum_resource_length) {
425 return (AE_AML_BAD_RESOURCE_LENGTH); 601 goto bad_resource_length;
426 } 602 }
427 break; 603 break;
428 604
@@ -432,7 +608,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
432 608
433 if ((resource_length > minimum_resource_length) || 609 if ((resource_length > minimum_resource_length) ||
434 (resource_length < (minimum_resource_length - 1))) { 610 (resource_length < (minimum_resource_length - 1))) {
435 return (AE_AML_BAD_RESOURCE_LENGTH); 611 goto bad_resource_length;
436 } 612 }
437 break; 613 break;
438 614
@@ -440,7 +616,23 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
440 616
441 /* Shouldn't happen (because of validation earlier), but be sure */ 617 /* Shouldn't happen (because of validation earlier), but be sure */
442 618
443 return (AE_AML_INVALID_RESOURCE_TYPE); 619 goto invalid_resource;
620 }
621
622 aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
623 if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
624
625 /* Validate the bus_type field */
626
627 if ((aml_resource->common_serial_bus.type == 0) ||
628 (aml_resource->common_serial_bus.type >
629 AML_RESOURCE_MAX_SERIALBUSTYPE)) {
630 ACPI_RESOURCE_ERROR((AE_INFO,
631 "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
632 aml_resource->common_serial_bus.
633 type));
634 return (AE_AML_INVALID_RESOURCE_TYPE);
635 }
444 } 636 }
445 637
446 /* Optionally return the resource table index */ 638 /* Optionally return the resource table index */
@@ -450,6 +642,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
450 } 642 }
451 643
452 return (AE_OK); 644 return (AE_OK);
645
646 invalid_resource:
647
648 ACPI_RESOURCE_ERROR((AE_INFO,
649 "Invalid/unsupported resource descriptor: Type 0x%2.2X",
650 resource_type));
651 return (AE_AML_INVALID_RESOURCE_TYPE);
652
653 bad_resource_length:
654
655 ACPI_RESOURCE_ERROR((AE_INFO,
656 "Invalid resource descriptor length: Type "
657 "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
658 resource_type, resource_length,
659 minimum_resource_length));
660 return (AE_AML_BAD_RESOURCE_LENGTH);
453} 661}
454 662
455/******************************************************************************* 663/*******************************************************************************
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 30c21e1a936..4267477c279 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 420ebfe08c7..644e8c8ebc4 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
48#include "acnamesp.h" 48#include "acnamesp.h"
49#include "acdebug.h" 49#include "acdebug.h"
50#include "actables.h" 50#include "actables.h"
51#include "acinterp.h"
51 52
52#define _COMPONENT ACPI_UTILITIES 53#define _COMPONENT ACPI_UTILITIES
53ACPI_MODULE_NAME("utxface") 54ACPI_MODULE_NAME("utxface")
@@ -640,4 +641,41 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
640} 641}
641 642
642ACPI_EXPORT_SYMBOL(acpi_install_interface_handler) 643ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
644
645/*****************************************************************************
646 *
647 * FUNCTION: acpi_check_address_range
648 *
649 * PARAMETERS: space_id - Address space ID
650 * Address - Start address
651 * Length - Length
652 * Warn - TRUE if warning on overlap desired
653 *
654 * RETURN: Count of the number of conflicts detected.
655 *
656 * DESCRIPTION: Check if the input address range overlaps any of the
657 * ASL operation region address ranges.
658 *
659 ****************************************************************************/
660u32
661acpi_check_address_range(acpi_adr_space_type space_id,
662 acpi_physical_address address,
663 acpi_size length, u8 warn)
664{
665 u32 overlaps;
666 acpi_status status;
667
668 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
669 if (ACPI_FAILURE(status)) {
670 return (0);
671 }
672
673 overlaps = acpi_ut_check_address_range(space_id, address,
674 (u32)length, warn);
675
676 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
677 return (overlaps);
678}
679
680ACPI_EXPORT_SYMBOL(acpi_check_address_range)
643#endif /* !ACPI_ASL_COMPILER */ 681#endif /* !ACPI_ASL_COMPILER */
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 8d0245ec431..52b568af181 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
new file mode 100644
index 00000000000..1427d191d15
--- /dev/null
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -0,0 +1,187 @@
1/*******************************************************************************
2 *
3 * Module Name: utxfmutex - external AML mutex access functions
4 *
5 ******************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acnamesp.h"
47
48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utxfmutex")
50
51/* Local prototypes */
52static acpi_status
53acpi_ut_get_mutex_object(acpi_handle handle,
54 acpi_string pathname,
55 union acpi_operand_object **ret_obj);
56
57/*******************************************************************************
58 *
59 * FUNCTION: acpi_ut_get_mutex_object
60 *
61 * PARAMETERS: Handle - Mutex or prefix handle (optional)
62 * Pathname - Mutex pathname (optional)
63 * ret_obj - Where the mutex object is returned
64 *
65 * RETURN: Status
66 *
67 * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by
68 * Handle:Pathname. Either Handle or Pathname can be NULL, but
69 * not both.
70 *
71 ******************************************************************************/
72
73static acpi_status
74acpi_ut_get_mutex_object(acpi_handle handle,
75 acpi_string pathname,
76 union acpi_operand_object **ret_obj)
77{
78 struct acpi_namespace_node *mutex_node;
79 union acpi_operand_object *mutex_obj;
80 acpi_status status;
81
82 /* Parameter validation */
83
84 if (!ret_obj || (!handle && !pathname)) {
85 return (AE_BAD_PARAMETER);
86 }
87
88 /* Get a the namespace node for the mutex */
89
90 mutex_node = handle;
91 if (pathname != NULL) {
92 status = acpi_get_handle(handle, pathname,
93 ACPI_CAST_PTR(acpi_handle,
94 &mutex_node));
95 if (ACPI_FAILURE(status)) {
96 return (status);
97 }
98 }
99
100 /* Ensure that we actually have a Mutex object */
101
102 if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) {
103 return (AE_TYPE);
104 }
105
106 /* Get the low-level mutex object */
107
108 mutex_obj = acpi_ns_get_attached_object(mutex_node);
109 if (!mutex_obj) {
110 return (AE_NULL_OBJECT);
111 }
112
113 *ret_obj = mutex_obj;
114 return (AE_OK);
115}
116
117/*******************************************************************************
118 *
119 * FUNCTION: acpi_acquire_mutex
120 *
121 * PARAMETERS: Handle - Mutex or prefix handle (optional)
122 * Pathname - Mutex pathname (optional)
123 * Timeout - Max time to wait for the lock (millisec)
124 *
125 * RETURN: Status
126 *
127 * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to
128 * AML mutex objects, and allows for transaction locking between
129 * drivers and AML code. The mutex node is pointed to by
130 * Handle:Pathname. Either Handle or Pathname can be NULL, but
131 * not both.
132 *
133 ******************************************************************************/
134
135acpi_status
136acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
137{
138 acpi_status status;
139 union acpi_operand_object *mutex_obj;
140
141 /* Get the low-level mutex associated with Handle:Pathname */
142
143 status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
144 if (ACPI_FAILURE(status)) {
145 return (status);
146 }
147
148 /* Acquire the OS mutex */
149
150 status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout);
151 return (status);
152}
153
154/*******************************************************************************
155 *
156 * FUNCTION: acpi_release_mutex
157 *
158 * PARAMETERS: Handle - Mutex or prefix handle (optional)
159 * Pathname - Mutex pathname (optional)
160 *
161 * RETURN: Status
162 *
163 * DESCRIPTION: Release an AML mutex. This is a device driver interface to
164 * AML mutex objects, and allows for transaction locking between
165 * drivers and AML code. The mutex node is pointed to by
166 * Handle:Pathname. Either Handle or Pathname can be NULL, but
167 * not both.
168 *
169 ******************************************************************************/
170
171acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
172{
173 acpi_status status;
174 union acpi_operand_object *mutex_obj;
175
176 /* Get the low-level mutex associated with Handle:Pathname */
177
178 status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
179 if (ACPI_FAILURE(status)) {
180 return (status);
181 }
182
183 /* Release the OS mutex */
184
185 acpi_os_release_mutex(mutex_obj->mutex.os_mutex);
186 return (AE_OK);
187}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 61540360d5c..e45350cb6ac 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -34,13 +34,13 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/acpi.h> 36#include <linux/acpi.h>
37#include <linux/acpi_io.h>
37#include <linux/slab.h> 38#include <linux/slab.h>
38#include <linux/io.h> 39#include <linux/io.h>
39#include <linux/kref.h> 40#include <linux/kref.h>
40#include <linux/rculist.h> 41#include <linux/rculist.h>
41#include <linux/interrupt.h> 42#include <linux/interrupt.h>
42#include <linux/debugfs.h> 43#include <linux/debugfs.h>
43#include <acpi/atomicio.h>
44 44
45#include "apei-internal.h" 45#include "apei-internal.h"
46 46
@@ -70,7 +70,7 @@ int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
70{ 70{
71 int rc; 71 int rc;
72 72
73 rc = acpi_atomic_read(val, &entry->register_region); 73 rc = apei_read(val, &entry->register_region);
74 if (rc) 74 if (rc)
75 return rc; 75 return rc;
76 *val >>= entry->register_region.bit_offset; 76 *val >>= entry->register_region.bit_offset;
@@ -116,13 +116,13 @@ int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
116 val <<= entry->register_region.bit_offset; 116 val <<= entry->register_region.bit_offset;
117 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { 117 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
118 u64 valr = 0; 118 u64 valr = 0;
119 rc = acpi_atomic_read(&valr, &entry->register_region); 119 rc = apei_read(&valr, &entry->register_region);
120 if (rc) 120 if (rc)
121 return rc; 121 return rc;
122 valr &= ~(entry->mask << entry->register_region.bit_offset); 122 valr &= ~(entry->mask << entry->register_region.bit_offset);
123 val |= valr; 123 val |= valr;
124 } 124 }
125 rc = acpi_atomic_write(val, &entry->register_region); 125 rc = apei_write(val, &entry->register_region);
126 126
127 return rc; 127 return rc;
128} 128}
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
243 u8 ins = entry->instruction; 243 u8 ins = entry->instruction;
244 244
245 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 245 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
246 return acpi_pre_map_gar(&entry->register_region); 246 return acpi_os_map_generic_address(&entry->register_region);
247 247
248 return 0; 248 return 0;
249} 249}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
276 u8 ins = entry->instruction; 276 u8 ins = entry->instruction;
277 277
278 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 278 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
279 acpi_post_unmap_gar(&entry->register_region); 279 acpi_os_unmap_generic_address(&entry->register_region);
280 280
281 return 0; 281 return 0;
282} 282}
@@ -421,6 +421,17 @@ static int apei_resources_merge(struct apei_resources *resources1,
421 return 0; 421 return 0;
422} 422}
423 423
424int apei_resources_add(struct apei_resources *resources,
425 unsigned long start, unsigned long size,
426 bool iomem)
427{
428 if (iomem)
429 return apei_res_add(&resources->iomem, start, size);
430 else
431 return apei_res_add(&resources->ioport, start, size);
432}
433EXPORT_SYMBOL_GPL(apei_resources_add);
434
424/* 435/*
425 * EINJ has two groups of GARs (EINJ table entry and trigger table 436 * EINJ has two groups of GARs (EINJ table entry and trigger table
426 * entry), so common resources are subtracted from the trigger table 437 * entry), so common resources are subtracted from the trigger table
@@ -438,8 +449,19 @@ int apei_resources_sub(struct apei_resources *resources1,
438} 449}
439EXPORT_SYMBOL_GPL(apei_resources_sub); 450EXPORT_SYMBOL_GPL(apei_resources_sub);
440 451
452static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
453{
454 struct apei_resources *resources = data;
455 return apei_res_add(&resources->iomem, start, size);
456}
457
458static int apei_get_nvs_resources(struct apei_resources *resources)
459{
460 return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
461}
462
441/* 463/*
442 * IO memory/port rersource management mechanism is used to check 464 * IO memory/port resource management mechanism is used to check
443 * whether memory/port area used by GARs conflicts with normal memory 465 * whether memory/port area used by GARs conflicts with normal memory
444 * or IO memory/port of devices. 466 * or IO memory/port of devices.
445 */ 467 */
@@ -448,21 +470,35 @@ int apei_resources_request(struct apei_resources *resources,
448{ 470{
449 struct apei_res *res, *res_bak = NULL; 471 struct apei_res *res, *res_bak = NULL;
450 struct resource *r; 472 struct resource *r;
473 struct apei_resources nvs_resources;
451 int rc; 474 int rc;
452 475
453 rc = apei_resources_sub(resources, &apei_resources_all); 476 rc = apei_resources_sub(resources, &apei_resources_all);
454 if (rc) 477 if (rc)
455 return rc; 478 return rc;
456 479
480 /*
481 * Some firmware uses ACPI NVS region, that has been marked as
482 * busy, so exclude it from APEI resources to avoid false
483 * conflict.
484 */
485 apei_resources_init(&nvs_resources);
486 rc = apei_get_nvs_resources(&nvs_resources);
487 if (rc)
488 goto res_fini;
489 rc = apei_resources_sub(resources, &nvs_resources);
490 if (rc)
491 goto res_fini;
492
457 rc = -EINVAL; 493 rc = -EINVAL;
458 list_for_each_entry(res, &resources->iomem, list) { 494 list_for_each_entry(res, &resources->iomem, list) {
459 r = request_mem_region(res->start, res->end - res->start, 495 r = request_mem_region(res->start, res->end - res->start,
460 desc); 496 desc);
461 if (!r) { 497 if (!r) {
462 pr_err(APEI_PFX 498 pr_err(APEI_PFX
463 "Can not request iomem region <%016llx-%016llx> for GARs.\n", 499 "Can not request [mem %#010llx-%#010llx] for %s registers\n",
464 (unsigned long long)res->start, 500 (unsigned long long)res->start,
465 (unsigned long long)res->end); 501 (unsigned long long)res->end - 1, desc);
466 res_bak = res; 502 res_bak = res;
467 goto err_unmap_iomem; 503 goto err_unmap_iomem;
468 } 504 }
@@ -472,9 +508,9 @@ int apei_resources_request(struct apei_resources *resources,
472 r = request_region(res->start, res->end - res->start, desc); 508 r = request_region(res->start, res->end - res->start, desc);
473 if (!r) { 509 if (!r) {
474 pr_err(APEI_PFX 510 pr_err(APEI_PFX
475 "Can not request ioport region <%016llx-%016llx> for GARs.\n", 511 "Can not request [io %#06llx-%#06llx] for %s registers\n",
476 (unsigned long long)res->start, 512 (unsigned long long)res->start,
477 (unsigned long long)res->end); 513 (unsigned long long)res->end - 1, desc);
478 res_bak = res; 514 res_bak = res;
479 goto err_unmap_ioport; 515 goto err_unmap_ioport;
480 } 516 }
@@ -500,6 +536,8 @@ err_unmap_iomem:
500 break; 536 break;
501 release_mem_region(res->start, res->end - res->start); 537 release_mem_region(res->start, res->end - res->start);
502 } 538 }
539res_fini:
540 apei_resources_fini(&nvs_resources);
503 return rc; 541 return rc;
504} 542}
505EXPORT_SYMBOL_GPL(apei_resources_request); 543EXPORT_SYMBOL_GPL(apei_resources_request);
@@ -553,6 +591,96 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
553 return 0; 591 return 0;
554} 592}
555 593
594/* read GAR in interrupt (including NMI) or process context */
595int apei_read(u64 *val, struct acpi_generic_address *reg)
596{
597 int rc;
598 u64 address;
599 u32 tmp, width = reg->bit_width;
600 acpi_status status;
601
602 rc = apei_check_gar(reg, &address);
603 if (rc)
604 return rc;
605
606 if (width == 64)
607 width = 32; /* Break into two 32-bit transfers */
608
609 *val = 0;
610 switch(reg->space_id) {
611 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
612 status = acpi_os_read_memory((acpi_physical_address)
613 address, &tmp, width);
614 if (ACPI_FAILURE(status))
615 return -EIO;
616 *val = tmp;
617
618 if (reg->bit_width == 64) {
619 /* Read the top 32 bits */
620 status = acpi_os_read_memory((acpi_physical_address)
621 (address + 4), &tmp, 32);
622 if (ACPI_FAILURE(status))
623 return -EIO;
624 *val |= ((u64)tmp << 32);
625 }
626 break;
627 case ACPI_ADR_SPACE_SYSTEM_IO:
628 status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
629 if (ACPI_FAILURE(status))
630 return -EIO;
631 break;
632 default:
633 return -EINVAL;
634 }
635
636 return 0;
637}
638EXPORT_SYMBOL_GPL(apei_read);
639
640/* write GAR in interrupt (including NMI) or process context */
641int apei_write(u64 val, struct acpi_generic_address *reg)
642{
643 int rc;
644 u64 address;
645 u32 width = reg->bit_width;
646 acpi_status status;
647
648 rc = apei_check_gar(reg, &address);
649 if (rc)
650 return rc;
651
652 if (width == 64)
653 width = 32; /* Break into two 32-bit transfers */
654
655 switch (reg->space_id) {
656 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
657 status = acpi_os_write_memory((acpi_physical_address)
658 address, ACPI_LODWORD(val),
659 width);
660 if (ACPI_FAILURE(status))
661 return -EIO;
662
663 if (reg->bit_width == 64) {
664 status = acpi_os_write_memory((acpi_physical_address)
665 (address + 4),
666 ACPI_HIDWORD(val), 32);
667 if (ACPI_FAILURE(status))
668 return -EIO;
669 }
670 break;
671 case ACPI_ADR_SPACE_SYSTEM_IO:
672 status = acpi_os_write_port(address, val, reg->bit_width);
673 if (ACPI_FAILURE(status))
674 return -EIO;
675 break;
676 default:
677 return -EINVAL;
678 }
679
680 return 0;
681}
682EXPORT_SYMBOL_GPL(apei_write);
683
556static int collect_res_callback(struct apei_exec_context *ctx, 684static int collect_res_callback(struct apei_exec_context *ctx,
557 struct acpi_whea_header *entry, 685 struct acpi_whea_header *entry,
558 void *data) 686 void *data)
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index f57050e7a5e..cca240a3303 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -68,6 +68,9 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
68/* IP has been set in instruction function */ 68/* IP has been set in instruction function */
69#define APEI_EXEC_SET_IP 1 69#define APEI_EXEC_SET_IP 1
70 70
71int apei_read(u64 *val, struct acpi_generic_address *reg);
72int apei_write(u64 val, struct acpi_generic_address *reg);
73
71int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val); 74int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
72int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val); 75int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
73int apei_exec_read_register(struct apei_exec_context *ctx, 76int apei_exec_read_register(struct apei_exec_context *ctx,
@@ -95,6 +98,9 @@ static inline void apei_resources_init(struct apei_resources *resources)
95} 98}
96 99
97void apei_resources_fini(struct apei_resources *resources); 100void apei_resources_fini(struct apei_resources *resources);
101int apei_resources_add(struct apei_resources *resources,
102 unsigned long start, unsigned long size,
103 bool iomem);
98int apei_resources_sub(struct apei_resources *resources1, 104int apei_resources_sub(struct apei_resources *resources1,
99 struct apei_resources *resources2); 105 struct apei_resources *resources2);
100int apei_resources_request(struct apei_resources *resources, 106int apei_resources_request(struct apei_resources *resources,
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 589b96c3870..5b898d4dda9 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -43,6 +43,42 @@
43#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) 43#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
44 44
45/* 45/*
46 * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
47 */
48static int acpi5;
49
50struct set_error_type_with_address {
51 u32 type;
52 u32 vendor_extension;
53 u32 flags;
54 u32 apicid;
55 u64 memory_address;
56 u64 memory_address_range;
57 u32 pcie_sbdf;
58};
59enum {
60 SETWA_FLAGS_APICID = 1,
61 SETWA_FLAGS_MEM = 2,
62 SETWA_FLAGS_PCIE_SBDF = 4,
63};
64
65/*
66 * Vendor extensions for platform specific operations
67 */
68struct vendor_error_type_extension {
69 u32 length;
70 u32 pcie_sbdf;
71 u16 vendor_id;
72 u16 device_id;
73 u8 rev_id;
74 u8 reserved[3];
75};
76
77static u32 vendor_flags;
78static struct debugfs_blob_wrapper vendor_blob;
79static char vendor_dev[64];
80
81/*
46 * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the 82 * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
47 * EINJ table through an unpublished extension. Use with caution as 83 * EINJ table through an unpublished extension. Use with caution as
48 * most will ignore the parameter and make their own choice of address 84 * most will ignore the parameter and make their own choice of address
@@ -103,7 +139,14 @@ static struct apei_exec_ins_type einj_ins_type[] = {
103 */ 139 */
104static DEFINE_MUTEX(einj_mutex); 140static DEFINE_MUTEX(einj_mutex);
105 141
106static struct einj_parameter *einj_param; 142static void *einj_param;
143
144#ifndef readq
145static inline __u64 readq(volatile void __iomem *addr)
146{
147 return ((__u64)readl(addr+4) << 32) + readl(addr);
148}
149#endif
107 150
108#ifndef writeq 151#ifndef writeq
109static inline void writeq(__u64 val, volatile void __iomem *addr) 152static inline void writeq(__u64 val, volatile void __iomem *addr)
@@ -158,10 +201,31 @@ static int einj_timedout(u64 *t)
158 return 0; 201 return 0;
159} 202}
160 203
161static u64 einj_get_parameter_address(void) 204static void check_vendor_extension(u64 paddr,
205 struct set_error_type_with_address *v5param)
206{
207 int offset = readl(&v5param->vendor_extension);
208 struct vendor_error_type_extension *v;
209 u32 sbdf;
210
211 if (!offset)
212 return;
213 v = ioremap(paddr + offset, sizeof(*v));
214 if (!v)
215 return;
216 sbdf = readl(&v->pcie_sbdf);
217 sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
218 sbdf >> 24, (sbdf >> 16) & 0xff,
219 (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
220 readw(&v->vendor_id), readw(&v->device_id),
221 readb(&v->rev_id));
222 iounmap(v);
223}
224
225static void *einj_get_parameter_address(void)
162{ 226{
163 int i; 227 int i;
164 u64 paddr = 0; 228 u64 paddrv4 = 0, paddrv5 = 0;
165 struct acpi_whea_header *entry; 229 struct acpi_whea_header *entry;
166 230
167 entry = EINJ_TAB_ENTRY(einj_tab); 231 entry = EINJ_TAB_ENTRY(einj_tab);
@@ -170,12 +234,40 @@ static u64 einj_get_parameter_address(void)
170 entry->instruction == ACPI_EINJ_WRITE_REGISTER && 234 entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
171 entry->register_region.space_id == 235 entry->register_region.space_id ==
172 ACPI_ADR_SPACE_SYSTEM_MEMORY) 236 ACPI_ADR_SPACE_SYSTEM_MEMORY)
173 memcpy(&paddr, &entry->register_region.address, 237 memcpy(&paddrv4, &entry->register_region.address,
174 sizeof(paddr)); 238 sizeof(paddrv4));
239 if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
240 entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
241 entry->register_region.space_id ==
242 ACPI_ADR_SPACE_SYSTEM_MEMORY)
243 memcpy(&paddrv5, &entry->register_region.address,
244 sizeof(paddrv5));
175 entry++; 245 entry++;
176 } 246 }
247 if (paddrv5) {
248 struct set_error_type_with_address *v5param;
249
250 v5param = ioremap(paddrv5, sizeof(*v5param));
251 if (v5param) {
252 acpi5 = 1;
253 check_vendor_extension(paddrv5, v5param);
254 return v5param;
255 }
256 }
257 if (paddrv4) {
258 struct einj_parameter *v4param;
259
260 v4param = ioremap(paddrv4, sizeof(*v4param));
261 if (!v4param)
262 return 0;
263 if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) {
264 iounmap(v4param);
265 return 0;
266 }
267 return v4param;
268 }
177 269
178 return paddr; 270 return 0;
179} 271}
180 272
181/* do sanity check to trigger table */ 273/* do sanity check to trigger table */
@@ -194,8 +286,29 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
194 return 0; 286 return 0;
195} 287}
196 288
289static struct acpi_generic_address *einj_get_trigger_parameter_region(
290 struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
291{
292 int i;
293 struct acpi_whea_header *entry;
294
295 entry = (struct acpi_whea_header *)
296 ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
297 for (i = 0; i < trigger_tab->entry_count; i++) {
298 if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
299 entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
300 entry->register_region.space_id ==
301 ACPI_ADR_SPACE_SYSTEM_MEMORY &&
302 (entry->register_region.address & param2) == (param1 & param2))
303 return &entry->register_region;
304 entry++;
305 }
306
307 return NULL;
308}
197/* Execute instructions in trigger error action table */ 309/* Execute instructions in trigger error action table */
198static int __einj_error_trigger(u64 trigger_paddr) 310static int __einj_error_trigger(u64 trigger_paddr, u32 type,
311 u64 param1, u64 param2)
199{ 312{
200 struct acpi_einj_trigger *trigger_tab = NULL; 313 struct acpi_einj_trigger *trigger_tab = NULL;
201 struct apei_exec_context trigger_ctx; 314 struct apei_exec_context trigger_ctx;
@@ -204,14 +317,16 @@ static int __einj_error_trigger(u64 trigger_paddr)
204 struct resource *r; 317 struct resource *r;
205 u32 table_size; 318 u32 table_size;
206 int rc = -EIO; 319 int rc = -EIO;
320 struct acpi_generic_address *trigger_param_region = NULL;
207 321
208 r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), 322 r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
209 "APEI EINJ Trigger Table"); 323 "APEI EINJ Trigger Table");
210 if (!r) { 324 if (!r) {
211 pr_err(EINJ_PFX 325 pr_err(EINJ_PFX
212 "Can not request iomem region <%016llx-%016llx> for Trigger table.\n", 326 "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
213 (unsigned long long)trigger_paddr, 327 (unsigned long long)trigger_paddr,
214 (unsigned long long)trigger_paddr+sizeof(*trigger_tab)); 328 (unsigned long long)trigger_paddr +
329 sizeof(*trigger_tab) - 1);
215 goto out; 330 goto out;
216 } 331 }
217 trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); 332 trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
@@ -232,9 +347,9 @@ static int __einj_error_trigger(u64 trigger_paddr)
232 "APEI EINJ Trigger Table"); 347 "APEI EINJ Trigger Table");
233 if (!r) { 348 if (!r) {
234 pr_err(EINJ_PFX 349 pr_err(EINJ_PFX
235"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n", 350"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
236 (unsigned long long)trigger_paddr+sizeof(*trigger_tab), 351 (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
237 (unsigned long long)trigger_paddr + table_size); 352 (unsigned long long)trigger_paddr + table_size - 1);
238 goto out_rel_header; 353 goto out_rel_header;
239 } 354 }
240 iounmap(trigger_tab); 355 iounmap(trigger_tab);
@@ -255,6 +370,30 @@ static int __einj_error_trigger(u64 trigger_paddr)
255 rc = apei_resources_sub(&trigger_resources, &einj_resources); 370 rc = apei_resources_sub(&trigger_resources, &einj_resources);
256 if (rc) 371 if (rc)
257 goto out_fini; 372 goto out_fini;
373 /*
374 * Some firmware will access target address specified in
375 * param1 to trigger the error when injecting memory error.
376 * This will cause resource conflict with regular memory. So
377 * remove it from trigger table resources.
378 */
379 if (param_extension && (type & 0x0038) && param2) {
380 struct apei_resources addr_resources;
381 apei_resources_init(&addr_resources);
382 trigger_param_region = einj_get_trigger_parameter_region(
383 trigger_tab, param1, param2);
384 if (trigger_param_region) {
385 rc = apei_resources_add(&addr_resources,
386 trigger_param_region->address,
387 trigger_param_region->bit_width/8, true);
388 if (rc)
389 goto out_fini;
390 rc = apei_resources_sub(&trigger_resources,
391 &addr_resources);
392 }
393 apei_resources_fini(&addr_resources);
394 if (rc)
395 goto out_fini;
396 }
258 rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); 397 rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
259 if (rc) 398 if (rc)
260 goto out_fini; 399 goto out_fini;
@@ -293,12 +432,56 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
293 if (rc) 432 if (rc)
294 return rc; 433 return rc;
295 apei_exec_ctx_set_input(&ctx, type); 434 apei_exec_ctx_set_input(&ctx, type);
296 rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); 435 if (acpi5) {
297 if (rc) 436 struct set_error_type_with_address *v5param = einj_param;
298 return rc; 437
299 if (einj_param) { 438 writel(type, &v5param->type);
300 writeq(param1, &einj_param->param1); 439 if (type & 0x80000000) {
301 writeq(param2, &einj_param->param2); 440 switch (vendor_flags) {
441 case SETWA_FLAGS_APICID:
442 writel(param1, &v5param->apicid);
443 break;
444 case SETWA_FLAGS_MEM:
445 writeq(param1, &v5param->memory_address);
446 writeq(param2, &v5param->memory_address_range);
447 break;
448 case SETWA_FLAGS_PCIE_SBDF:
449 writel(param1, &v5param->pcie_sbdf);
450 break;
451 }
452 writel(vendor_flags, &v5param->flags);
453 } else {
454 switch (type) {
455 case ACPI_EINJ_PROCESSOR_CORRECTABLE:
456 case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
457 case ACPI_EINJ_PROCESSOR_FATAL:
458 writel(param1, &v5param->apicid);
459 writel(SETWA_FLAGS_APICID, &v5param->flags);
460 break;
461 case ACPI_EINJ_MEMORY_CORRECTABLE:
462 case ACPI_EINJ_MEMORY_UNCORRECTABLE:
463 case ACPI_EINJ_MEMORY_FATAL:
464 writeq(param1, &v5param->memory_address);
465 writeq(param2, &v5param->memory_address_range);
466 writel(SETWA_FLAGS_MEM, &v5param->flags);
467 break;
468 case ACPI_EINJ_PCIX_CORRECTABLE:
469 case ACPI_EINJ_PCIX_UNCORRECTABLE:
470 case ACPI_EINJ_PCIX_FATAL:
471 writel(param1, &v5param->pcie_sbdf);
472 writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags);
473 break;
474 }
475 }
476 } else {
477 rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
478 if (rc)
479 return rc;
480 if (einj_param) {
481 struct einj_parameter *v4param = einj_param;
482 writeq(param1, &v4param->param1);
483 writeq(param2, &v4param->param2);
484 }
302 } 485 }
303 rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); 486 rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
304 if (rc) 487 if (rc)
@@ -324,7 +507,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
324 if (rc) 507 if (rc)
325 return rc; 508 return rc;
326 trigger_paddr = apei_exec_ctx_get_output(&ctx); 509 trigger_paddr = apei_exec_ctx_get_output(&ctx);
327 rc = __einj_error_trigger(trigger_paddr); 510 rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
328 if (rc) 511 if (rc)
329 return rc; 512 return rc;
330 rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); 513 rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
@@ -408,15 +591,25 @@ static int error_type_set(void *data, u64 val)
408{ 591{
409 int rc; 592 int rc;
410 u32 available_error_type = 0; 593 u32 available_error_type = 0;
594 u32 tval, vendor;
595
596 /*
597 * Vendor defined types have 0x80000000 bit set, and
598 * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
599 */
600 vendor = val & 0x80000000;
601 tval = val & 0x7fffffff;
411 602
412 /* Only one error type can be specified */ 603 /* Only one error type can be specified */
413 if (val & (val - 1)) 604 if (tval & (tval - 1))
414 return -EINVAL;
415 rc = einj_get_available_error_type(&available_error_type);
416 if (rc)
417 return rc;
418 if (!(val & available_error_type))
419 return -EINVAL; 605 return -EINVAL;
606 if (!vendor) {
607 rc = einj_get_available_error_type(&available_error_type);
608 if (rc)
609 return rc;
610 if (!(val & available_error_type))
611 return -EINVAL;
612 }
420 error_type = val; 613 error_type = val;
421 614
422 return 0; 615 return 0;
@@ -455,7 +648,6 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
455static int __init einj_init(void) 648static int __init einj_init(void)
456{ 649{
457 int rc; 650 int rc;
458 u64 param_paddr;
459 acpi_status status; 651 acpi_status status;
460 struct dentry *fentry; 652 struct dentry *fentry;
461 struct apei_exec_context ctx; 653 struct apei_exec_context ctx;
@@ -465,10 +657,9 @@ static int __init einj_init(void)
465 657
466 status = acpi_get_table(ACPI_SIG_EINJ, 0, 658 status = acpi_get_table(ACPI_SIG_EINJ, 0,
467 (struct acpi_table_header **)&einj_tab); 659 (struct acpi_table_header **)&einj_tab);
468 if (status == AE_NOT_FOUND) { 660 if (status == AE_NOT_FOUND)
469 pr_info(EINJ_PFX "Table is not found!\n");
470 return -ENODEV; 661 return -ENODEV;
471 } else if (ACPI_FAILURE(status)) { 662 else if (ACPI_FAILURE(status)) {
472 const char *msg = acpi_format_exception(status); 663 const char *msg = acpi_format_exception(status);
473 pr_err(EINJ_PFX "Failed to get table, %s\n", msg); 664 pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
474 return -EINVAL; 665 return -EINVAL;
@@ -509,23 +700,30 @@ static int __init einj_init(void)
509 rc = apei_exec_pre_map_gars(&ctx); 700 rc = apei_exec_pre_map_gars(&ctx);
510 if (rc) 701 if (rc)
511 goto err_release; 702 goto err_release;
512 if (param_extension) { 703
513 param_paddr = einj_get_parameter_address(); 704 einj_param = einj_get_parameter_address();
514 if (param_paddr) { 705 if ((param_extension || acpi5) && einj_param) {
515 einj_param = ioremap(param_paddr, sizeof(*einj_param)); 706 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
516 rc = -ENOMEM; 707 einj_debug_dir, &error_param1);
517 if (!einj_param) 708 if (!fentry)
518 goto err_unmap; 709 goto err_unmap;
519 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, 710 fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
520 einj_debug_dir, &error_param1); 711 einj_debug_dir, &error_param2);
521 if (!fentry) 712 if (!fentry)
522 goto err_unmap; 713 goto err_unmap;
523 fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, 714 }
524 einj_debug_dir, &error_param2); 715
525 if (!fentry) 716 if (vendor_dev[0]) {
526 goto err_unmap; 717 vendor_blob.data = vendor_dev;
527 } else 718 vendor_blob.size = strlen(vendor_dev);
528 pr_warn(EINJ_PFX "Parameter extension is not supported.\n"); 719 fentry = debugfs_create_blob("vendor", S_IRUSR,
720 einj_debug_dir, &vendor_blob);
721 if (!fentry)
722 goto err_unmap;
723 fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
724 einj_debug_dir, &vendor_flags);
725 if (!fentry)
726 goto err_unmap;
529 } 727 }
530 728
531 pr_info(EINJ_PFX "Error INJection is initialized.\n"); 729 pr_info(EINJ_PFX "Error INJection is initialized.\n");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6a9e3bad13f..eb9fab5b96e 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1127,10 +1127,9 @@ static int __init erst_init(void)
1127 1127
1128 status = acpi_get_table(ACPI_SIG_ERST, 0, 1128 status = acpi_get_table(ACPI_SIG_ERST, 0,
1129 (struct acpi_table_header **)&erst_tab); 1129 (struct acpi_table_header **)&erst_tab);
1130 if (status == AE_NOT_FOUND) { 1130 if (status == AE_NOT_FOUND)
1131 pr_info(ERST_PFX "Table is not found!\n");
1132 goto err; 1131 goto err;
1133 } else if (ACPI_FAILURE(status)) { 1132 else if (ACPI_FAILURE(status)) {
1134 const char *msg = acpi_format_exception(status); 1133 const char *msg = acpi_format_exception(status);
1135 pr_err(ERST_PFX "Failed to get table, %s\n", msg); 1134 pr_err(ERST_PFX "Failed to get table, %s\n", msg);
1136 rc = -EINVAL; 1135 rc = -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index ebaf037a787..9b3cac0abec 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/acpi.h> 35#include <linux/acpi.h>
36#include <linux/acpi_io.h>
36#include <linux/io.h> 37#include <linux/io.h>
37#include <linux/interrupt.h> 38#include <linux/interrupt.h>
38#include <linux/timer.h> 39#include <linux/timer.h>
@@ -45,8 +46,9 @@
45#include <linux/irq_work.h> 46#include <linux/irq_work.h>
46#include <linux/llist.h> 47#include <linux/llist.h>
47#include <linux/genalloc.h> 48#include <linux/genalloc.h>
49#include <linux/pci.h>
50#include <linux/aer.h>
48#include <acpi/apei.h> 51#include <acpi/apei.h>
49#include <acpi/atomicio.h>
50#include <acpi/hed.h> 52#include <acpi/hed.h>
51#include <asm/mce.h> 53#include <asm/mce.h>
52#include <asm/tlbflush.h> 54#include <asm/tlbflush.h>
@@ -299,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
299 if (!ghes) 301 if (!ghes)
300 return ERR_PTR(-ENOMEM); 302 return ERR_PTR(-ENOMEM);
301 ghes->generic = generic; 303 ghes->generic = generic;
302 rc = acpi_pre_map_gar(&generic->error_status_address); 304 rc = acpi_os_map_generic_address(&generic->error_status_address);
303 if (rc) 305 if (rc)
304 goto err_free; 306 goto err_free;
305 error_block_length = generic->error_block_length; 307 error_block_length = generic->error_block_length;
@@ -319,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
319 return ghes; 321 return ghes;
320 322
321err_unmap: 323err_unmap:
322 acpi_post_unmap_gar(&generic->error_status_address); 324 acpi_os_unmap_generic_address(&generic->error_status_address);
323err_free: 325err_free:
324 kfree(ghes); 326 kfree(ghes);
325 return ERR_PTR(rc); 327 return ERR_PTR(rc);
@@ -328,7 +330,7 @@ err_free:
328static void ghes_fini(struct ghes *ghes) 330static void ghes_fini(struct ghes *ghes)
329{ 331{
330 kfree(ghes->estatus); 332 kfree(ghes->estatus);
331 acpi_post_unmap_gar(&ghes->generic->error_status_address); 333 acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
332} 334}
333 335
334enum { 336enum {
@@ -399,7 +401,7 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
399 u32 len; 401 u32 len;
400 int rc; 402 int rc;
401 403
402 rc = acpi_atomic_read(&buf_paddr, &g->error_status_address); 404 rc = apei_read(&buf_paddr, &g->error_status_address);
403 if (rc) { 405 if (rc) {
404 if (!silent && printk_ratelimit()) 406 if (!silent && printk_ratelimit())
405 pr_warning(FW_WARN GHES_PFX 407 pr_warning(FW_WARN GHES_PFX
@@ -476,6 +478,27 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
476 } 478 }
477#endif 479#endif
478 } 480 }
481#ifdef CONFIG_ACPI_APEI_PCIEAER
482 else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
483 CPER_SEC_PCIE)) {
484 struct cper_sec_pcie *pcie_err;
485 pcie_err = (struct cper_sec_pcie *)(gdata+1);
486 if (sev == GHES_SEV_RECOVERABLE &&
487 sec_sev == GHES_SEV_RECOVERABLE &&
488 pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
489 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
490 unsigned int devfn;
491 int aer_severity;
492 devfn = PCI_DEVFN(pcie_err->device_id.device,
493 pcie_err->device_id.function);
494 aer_severity = cper_severity_to_aer(sev);
495 aer_recover_queue(pcie_err->device_id.segment,
496 pcie_err->device_id.bus,
497 devfn, aer_severity);
498 }
499
500 }
501#endif
479 } 502 }
480} 503}
481 504
@@ -483,16 +506,22 @@ static void __ghes_print_estatus(const char *pfx,
483 const struct acpi_hest_generic *generic, 506 const struct acpi_hest_generic *generic,
484 const struct acpi_hest_generic_status *estatus) 507 const struct acpi_hest_generic_status *estatus)
485{ 508{
509 static atomic_t seqno;
510 unsigned int curr_seqno;
511 char pfx_seq[64];
512
486 if (pfx == NULL) { 513 if (pfx == NULL) {
487 if (ghes_severity(estatus->error_severity) <= 514 if (ghes_severity(estatus->error_severity) <=
488 GHES_SEV_CORRECTED) 515 GHES_SEV_CORRECTED)
489 pfx = KERN_WARNING HW_ERR; 516 pfx = KERN_WARNING;
490 else 517 else
491 pfx = KERN_ERR HW_ERR; 518 pfx = KERN_ERR;
492 } 519 }
520 curr_seqno = atomic_inc_return(&seqno);
521 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
493 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", 522 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
494 pfx, generic->header.source_id); 523 pfx_seq, generic->header.source_id);
495 apei_estatus_print(pfx, estatus); 524 apei_estatus_print(pfx_seq, estatus);
496} 525}
497 526
498static int ghes_print_estatus(const char *pfx, 527static int ghes_print_estatus(const char *pfx,
@@ -711,26 +740,34 @@ static int ghes_notify_sci(struct notifier_block *this,
711 return ret; 740 return ret;
712} 741}
713 742
743static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
744{
745 struct llist_node *next, *tail = NULL;
746
747 while (llnode) {
748 next = llnode->next;
749 llnode->next = tail;
750 tail = llnode;
751 llnode = next;
752 }
753
754 return tail;
755}
756
714static void ghes_proc_in_irq(struct irq_work *irq_work) 757static void ghes_proc_in_irq(struct irq_work *irq_work)
715{ 758{
716 struct llist_node *llnode, *next, *tail = NULL; 759 struct llist_node *llnode, *next;
717 struct ghes_estatus_node *estatus_node; 760 struct ghes_estatus_node *estatus_node;
718 struct acpi_hest_generic *generic; 761 struct acpi_hest_generic *generic;
719 struct acpi_hest_generic_status *estatus; 762 struct acpi_hest_generic_status *estatus;
720 u32 len, node_len; 763 u32 len, node_len;
721 764
765 llnode = llist_del_all(&ghes_estatus_llist);
722 /* 766 /*
723 * Because the time order of estatus in list is reversed, 767 * Because the time order of estatus in list is reversed,
724 * revert it back to proper order. 768 * revert it back to proper order.
725 */ 769 */
726 llnode = llist_del_all(&ghes_estatus_llist); 770 llnode = llist_nodes_reverse(llnode);
727 while (llnode) {
728 next = llnode->next;
729 llnode->next = tail;
730 tail = llnode;
731 llnode = next;
732 }
733 llnode = tail;
734 while (llnode) { 771 while (llnode) {
735 next = llnode->next; 772 next = llnode->next;
736 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 773 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
@@ -750,6 +787,32 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
750 } 787 }
751} 788}
752 789
790static void ghes_print_queued_estatus(void)
791{
792 struct llist_node *llnode;
793 struct ghes_estatus_node *estatus_node;
794 struct acpi_hest_generic *generic;
795 struct acpi_hest_generic_status *estatus;
796 u32 len, node_len;
797
798 llnode = llist_del_all(&ghes_estatus_llist);
799 /*
800 * Because the time order of estatus in list is reversed,
801 * revert it back to proper order.
802 */
803 llnode = llist_nodes_reverse(llnode);
804 while (llnode) {
805 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
806 llnode);
807 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
808 len = apei_estatus_len(estatus);
809 node_len = GHES_ESTATUS_NODE_LEN(len);
810 generic = estatus_node->generic;
811 ghes_print_estatus(NULL, generic, estatus);
812 llnode = llnode->next;
813 }
814}
815
753static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) 816static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
754{ 817{
755 struct ghes *ghes, *ghes_global = NULL; 818 struct ghes *ghes, *ghes_global = NULL;
@@ -775,7 +838,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
775 838
776 if (sev_global >= GHES_SEV_PANIC) { 839 if (sev_global >= GHES_SEV_PANIC) {
777 oops_begin(); 840 oops_begin();
778 __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic, 841 ghes_print_queued_estatus();
842 __ghes_print_estatus(KERN_EMERG, ghes_global->generic,
779 ghes_global->estatus); 843 ghes_global->estatus);
780 /* reboot to log the error! */ 844 /* reboot to log the error! */
781 if (panic_timeout == 0) 845 if (panic_timeout == 0)
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index ee7fddc4665..7f00cf38098 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -221,10 +221,9 @@ void __init acpi_hest_init(void)
221 221
222 status = acpi_get_table(ACPI_SIG_HEST, 0, 222 status = acpi_get_table(ACPI_SIG_HEST, 0,
223 (struct acpi_table_header **)&hest_tab); 223 (struct acpi_table_header **)&hest_tab);
224 if (status == AE_NOT_FOUND) { 224 if (status == AE_NOT_FOUND)
225 pr_info(HEST_PFX "Table not found.\n");
226 goto err; 225 goto err;
227 } else if (ACPI_FAILURE(status)) { 226 else if (ACPI_FAILURE(status)) {
228 const char *msg = acpi_format_exception(status); 227 const char *msg = acpi_format_exception(status);
229 pr_err(HEST_PFX "Failed to get table, %s\n", msg); 228 pr_err(HEST_PFX "Failed to get table, %s\n", msg);
230 rc = -EINVAL; 229 rc = -EINVAL;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index cfc0cc10af3..d4a5b3d3657 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -32,6 +32,8 @@
32#include <linux/rculist.h> 32#include <linux/rculist.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/mm.h>
36#include <linux/highmem.h>
35#include <acpi/atomicio.h> 37#include <acpi/atomicio.h>
36 38
37#define ACPI_PFX "ACPI: " 39#define ACPI_PFX "ACPI: "
@@ -97,6 +99,37 @@ static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
97 return NULL; 99 return NULL;
98} 100}
99 101
102#ifndef CONFIG_IA64
103#define should_use_kmap(pfn) page_is_ram(pfn)
104#else
105/* ioremap will take care of cache attributes */
106#define should_use_kmap(pfn) 0
107#endif
108
109static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
110{
111 unsigned long pfn;
112
113 pfn = pg_off >> PAGE_SHIFT;
114 if (should_use_kmap(pfn)) {
115 if (pg_sz > PAGE_SIZE)
116 return NULL;
117 return (void __iomem __force *)kmap(pfn_to_page(pfn));
118 } else
119 return ioremap(pg_off, pg_sz);
120}
121
122static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
123{
124 unsigned long pfn;
125
126 pfn = pg_off >> PAGE_SHIFT;
127 if (page_is_ram(pfn))
128 kunmap(pfn_to_page(pfn));
129 else
130 iounmap(vaddr);
131}
132
100/* 133/*
101 * Used to pre-map the specified IO memory area. First try to find 134 * Used to pre-map the specified IO memory area. First try to find
102 * whether the area is already pre-mapped, if it is, increase the 135 * whether the area is already pre-mapped, if it is, increase the
@@ -119,7 +152,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
119 152
120 pg_off = paddr & PAGE_MASK; 153 pg_off = paddr & PAGE_MASK;
121 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; 154 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
122 vaddr = ioremap(pg_off, pg_sz); 155 vaddr = acpi_map(pg_off, pg_sz);
123 if (!vaddr) 156 if (!vaddr)
124 return NULL; 157 return NULL;
125 map = kmalloc(sizeof(*map), GFP_KERNEL); 158 map = kmalloc(sizeof(*map), GFP_KERNEL);
@@ -135,7 +168,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
135 vaddr = __acpi_try_ioremap(paddr, size); 168 vaddr = __acpi_try_ioremap(paddr, size);
136 if (vaddr) { 169 if (vaddr) {
137 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 170 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
138 iounmap(map->vaddr); 171 acpi_unmap(pg_off, map->vaddr);
139 kfree(map); 172 kfree(map);
140 return vaddr; 173 return vaddr;
141 } 174 }
@@ -144,7 +177,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
144 177
145 return map->vaddr + (paddr - map->paddr); 178 return map->vaddr + (paddr - map->paddr);
146err_unmap: 179err_unmap:
147 iounmap(vaddr); 180 acpi_unmap(pg_off, vaddr);
148 return NULL; 181 return NULL;
149} 182}
150 183
@@ -177,7 +210,7 @@ static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
177 return; 210 return;
178 211
179 synchronize_rcu(); 212 synchronize_rcu();
180 iounmap(map->vaddr); 213 acpi_unmap(map->paddr, map->vaddr);
181 kfree(map); 214 kfree(map);
182} 215}
183 216
@@ -260,6 +293,21 @@ int acpi_post_unmap_gar(struct acpi_generic_address *reg)
260} 293}
261EXPORT_SYMBOL_GPL(acpi_post_unmap_gar); 294EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
262 295
296#ifdef readq
297static inline u64 read64(const volatile void __iomem *addr)
298{
299 return readq(addr);
300}
301#else
302static inline u64 read64(const volatile void __iomem *addr)
303{
304 u64 l, h;
305 l = readl(addr);
306 h = readl(addr+4);
307 return l | (h << 32);
308}
309#endif
310
263/* 311/*
264 * Can be used in atomic (including NMI) or process context. RCU read 312 * Can be used in atomic (including NMI) or process context. RCU read
265 * lock can only be released after the IO memory area accessing. 313 * lock can only be released after the IO memory area accessing.
@@ -280,11 +328,9 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
280 case 32: 328 case 32:
281 *val = readl(addr); 329 *val = readl(addr);
282 break; 330 break;
283#ifdef readq
284 case 64: 331 case 64:
285 *val = readq(addr); 332 *val = read64(addr);
286 break; 333 break;
287#endif
288 default: 334 default:
289 return -EINVAL; 335 return -EINVAL;
290 } 336 }
@@ -293,6 +339,19 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
293 return 0; 339 return 0;
294} 340}
295 341
342#ifdef writeq
343static inline void write64(u64 val, volatile void __iomem *addr)
344{
345 writeq(val, addr);
346}
347#else
348static inline void write64(u64 val, volatile void __iomem *addr)
349{
350 writel(val, addr);
351 writel(val>>32, addr+4);
352}
353#endif
354
296static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) 355static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
297{ 356{
298 void __iomem *addr; 357 void __iomem *addr;
@@ -309,11 +368,9 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
309 case 32: 368 case 32:
310 writel(val, addr); 369 writel(val, addr);
311 break; 370 break;
312#ifdef writeq
313 case 64: 371 case 64:
314 writeq(val, addr); 372 write64(val, addr);
315 break; 373 break;
316#endif
317 default: 374 default:
318 return -EINVAL; 375 return -EINVAL;
319 } 376 }
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 3b5c3189fd9..e56f3be7b07 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
45static int node_to_pxm_map[MAX_NUMNODES] 45static int node_to_pxm_map[MAX_NUMNODES]
46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 47
48unsigned char acpi_srat_revision __initdata;
49
48int pxm_to_node(int pxm) 50int pxm_to_node(int pxm)
49{ 51{
50 if (pxm < 0) 52 if (pxm < 0)
@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
255 257
256static int __init acpi_parse_srat(struct acpi_table_header *table) 258static int __init acpi_parse_srat(struct acpi_table_header *table)
257{ 259{
260 struct acpi_table_srat *srat;
258 if (!table) 261 if (!table)
259 return -EINVAL; 262 return -EINVAL;
260 263
264 srat = (struct acpi_table_srat *)table;
265 acpi_srat_revision = srat->header.revision;
266
261 /* Real work done in acpi_table_parse_srat below. */ 267 /* Real work done in acpi_table_parse_srat below. */
262 268
263 return 0; 269 return 0;
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 096787b43c9..7a2035fa8c7 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -15,6 +15,56 @@
15#include <linux/acpi_io.h> 15#include <linux/acpi_io.h>
16#include <acpi/acpiosxf.h> 16#include <acpi/acpiosxf.h>
17 17
18/* ACPI NVS regions, APEI may use it */
19
20struct nvs_region {
21 __u64 phys_start;
22 __u64 size;
23 struct list_head node;
24};
25
26static LIST_HEAD(nvs_region_list);
27
28#ifdef CONFIG_ACPI_SLEEP
29static int suspend_nvs_register(unsigned long start, unsigned long size);
30#else
31static inline int suspend_nvs_register(unsigned long a, unsigned long b)
32{
33 return 0;
34}
35#endif
36
37int acpi_nvs_register(__u64 start, __u64 size)
38{
39 struct nvs_region *region;
40
41 region = kmalloc(sizeof(*region), GFP_KERNEL);
42 if (!region)
43 return -ENOMEM;
44 region->phys_start = start;
45 region->size = size;
46 list_add_tail(&region->node, &nvs_region_list);
47
48 return suspend_nvs_register(start, size);
49}
50
51int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
52 void *data)
53{
54 int rc;
55 struct nvs_region *region;
56
57 list_for_each_entry(region, &nvs_region_list, node) {
58 rc = func(region->phys_start, region->size, data);
59 if (rc)
60 return rc;
61 }
62
63 return 0;
64}
65
66
67#ifdef CONFIG_ACPI_SLEEP
18/* 68/*
19 * Platforms, like ACPI, may want us to save some memory used by them during 69 * Platforms, like ACPI, may want us to save some memory used by them during
20 * suspend and to restore the contents of this memory during the subsequent 70 * suspend and to restore the contents of this memory during the subsequent
@@ -41,7 +91,7 @@ static LIST_HEAD(nvs_list);
41 * things so that the data from page-aligned addresses in this region will 91 * things so that the data from page-aligned addresses in this region will
42 * be copied into separate RAM pages. 92 * be copied into separate RAM pages.
43 */ 93 */
44int suspend_nvs_register(unsigned long start, unsigned long size) 94static int suspend_nvs_register(unsigned long start, unsigned long size)
45{ 95{
46 struct nvs_page *entry, *next; 96 struct nvs_page *entry, *next;
47 97
@@ -159,3 +209,4 @@ void suspend_nvs_restore(void)
159 if (entry->data) 209 if (entry->data)
160 memcpy(entry->kaddr, entry->data, entry->size); 210 memcpy(entry->kaddr, entry->data, entry->size);
161} 211}
212#endif
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f31c5c5f1b7..fcc12d842bc 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -83,19 +83,6 @@ static struct workqueue_struct *kacpi_notify_wq;
83struct workqueue_struct *kacpi_hotplug_wq; 83struct workqueue_struct *kacpi_hotplug_wq;
84EXPORT_SYMBOL(kacpi_hotplug_wq); 84EXPORT_SYMBOL(kacpi_hotplug_wq);
85 85
86struct acpi_res_list {
87 resource_size_t start;
88 resource_size_t end;
89 acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
90 char name[5]; /* only can have a length of 4 chars, make use of this
91 one instead of res->name, no need to kalloc then */
92 struct list_head resource_list;
93 int count;
94};
95
96static LIST_HEAD(resource_list_head);
97static DEFINE_SPINLOCK(acpi_res_lock);
98
99/* 86/*
100 * This list of permanent mappings is for memory that may be accessed from 87 * This list of permanent mappings is for memory that may be accessed from
101 * interrupt context, where we can't do the ioremap(). 88 * interrupt context, where we can't do the ioremap().
@@ -166,17 +153,21 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
166 return supported; 153 return supported;
167} 154}
168 155
169static void __init acpi_request_region (struct acpi_generic_address *addr, 156static void __init acpi_request_region (struct acpi_generic_address *gas,
170 unsigned int length, char *desc) 157 unsigned int length, char *desc)
171{ 158{
172 if (!addr->address || !length) 159 u64 addr;
160
161 /* Handle possible alignment issues */
162 memcpy(&addr, &gas->address, sizeof(addr));
163 if (!addr || !length)
173 return; 164 return;
174 165
175 /* Resources are never freed */ 166 /* Resources are never freed */
176 if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 167 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
177 request_region(addr->address, length, desc); 168 request_region(addr, length, desc);
178 else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 169 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
179 request_mem_region(addr->address, length, desc); 170 request_mem_region(addr, length, desc);
180} 171}
181 172
182static int __init acpi_reserve_resources(void) 173static int __init acpi_reserve_resources(void)
@@ -427,35 +418,42 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
427 __acpi_unmap_table(virt, size); 418 __acpi_unmap_table(virt, size);
428} 419}
429 420
430static int acpi_os_map_generic_address(struct acpi_generic_address *addr) 421int acpi_os_map_generic_address(struct acpi_generic_address *gas)
431{ 422{
423 u64 addr;
432 void __iomem *virt; 424 void __iomem *virt;
433 425
434 if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 426 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
435 return 0; 427 return 0;
436 428
437 if (!addr->address || !addr->bit_width) 429 /* Handle possible alignment issues */
430 memcpy(&addr, &gas->address, sizeof(addr));
431 if (!addr || !gas->bit_width)
438 return -EINVAL; 432 return -EINVAL;
439 433
440 virt = acpi_os_map_memory(addr->address, addr->bit_width / 8); 434 virt = acpi_os_map_memory(addr, gas->bit_width / 8);
441 if (!virt) 435 if (!virt)
442 return -EIO; 436 return -EIO;
443 437
444 return 0; 438 return 0;
445} 439}
440EXPORT_SYMBOL(acpi_os_map_generic_address);
446 441
447static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr) 442void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
448{ 443{
444 u64 addr;
449 struct acpi_ioremap *map; 445 struct acpi_ioremap *map;
450 446
451 if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 447 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
452 return; 448 return;
453 449
454 if (!addr->address || !addr->bit_width) 450 /* Handle possible alignment issues */
451 memcpy(&addr, &gas->address, sizeof(addr));
452 if (!addr || !gas->bit_width)
455 return; 453 return;
456 454
457 mutex_lock(&acpi_ioremap_lock); 455 mutex_lock(&acpi_ioremap_lock);
458 map = acpi_map_lookup(addr->address, addr->bit_width / 8); 456 map = acpi_map_lookup(addr, gas->bit_width / 8);
459 if (!map) { 457 if (!map) {
460 mutex_unlock(&acpi_ioremap_lock); 458 mutex_unlock(&acpi_ioremap_lock);
461 return; 459 return;
@@ -465,6 +463,7 @@ static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
465 463
466 acpi_os_map_cleanup(map); 464 acpi_os_map_cleanup(map);
467} 465}
466EXPORT_SYMBOL(acpi_os_unmap_generic_address);
468 467
469#ifdef ACPI_FUTURE_USAGE 468#ifdef ACPI_FUTURE_USAGE
470acpi_status 469acpi_status
@@ -1278,44 +1277,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1278 * drivers */ 1277 * drivers */
1279int acpi_check_resource_conflict(const struct resource *res) 1278int acpi_check_resource_conflict(const struct resource *res)
1280{ 1279{
1281 struct acpi_res_list *res_list_elem; 1280 acpi_adr_space_type space_id;
1282 int ioport = 0, clash = 0; 1281 acpi_size length;
1282 u8 warn = 0;
1283 int clash = 0;
1283 1284
1284 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1285 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1285 return 0; 1286 return 0;
1286 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1287 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1287 return 0; 1288 return 0;
1288 1289
1289 ioport = res->flags & IORESOURCE_IO; 1290 if (res->flags & IORESOURCE_IO)
1290 1291 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1291 spin_lock(&acpi_res_lock); 1292 else
1292 list_for_each_entry(res_list_elem, &resource_list_head, 1293 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1293 resource_list) {
1294 if (ioport && (res_list_elem->resource_type
1295 != ACPI_ADR_SPACE_SYSTEM_IO))
1296 continue;
1297 if (!ioport && (res_list_elem->resource_type
1298 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1299 continue;
1300 1294
1301 if (res->end < res_list_elem->start 1295 length = res->end - res->start + 1;
1302 || res_list_elem->end < res->start) 1296 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1303 continue; 1297 warn = 1;
1304 clash = 1; 1298 clash = acpi_check_address_range(space_id, res->start, length, warn);
1305 break;
1306 }
1307 spin_unlock(&acpi_res_lock);
1308 1299
1309 if (clash) { 1300 if (clash) {
1310 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1301 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1311 printk(KERN_WARNING "ACPI: resource %s %pR"
1312 " conflicts with ACPI region %s "
1313 "[%s 0x%zx-0x%zx]\n",
1314 res->name, res, res_list_elem->name,
1315 (res_list_elem->resource_type ==
1316 ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
1317 (size_t) res_list_elem->start,
1318 (size_t) res_list_elem->end);
1319 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1302 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1320 printk(KERN_NOTICE "ACPI: This conflict may" 1303 printk(KERN_NOTICE "ACPI: This conflict may"
1321 " cause random problems and system" 1304 " cause random problems and system"
@@ -1467,155 +1450,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1467 kmem_cache_free(cache, object); 1450 kmem_cache_free(cache, object);
1468 return (AE_OK); 1451 return (AE_OK);
1469} 1452}
1470
1471static inline int acpi_res_list_add(struct acpi_res_list *res)
1472{
1473 struct acpi_res_list *res_list_elem;
1474
1475 list_for_each_entry(res_list_elem, &resource_list_head,
1476 resource_list) {
1477
1478 if (res->resource_type == res_list_elem->resource_type &&
1479 res->start == res_list_elem->start &&
1480 res->end == res_list_elem->end) {
1481
1482 /*
1483 * The Region(addr,len) already exist in the list,
1484 * just increase the count
1485 */
1486
1487 res_list_elem->count++;
1488 return 0;
1489 }
1490 }
1491
1492 res->count = 1;
1493 list_add(&res->resource_list, &resource_list_head);
1494 return 1;
1495}
1496
1497static inline void acpi_res_list_del(struct acpi_res_list *res)
1498{
1499 struct acpi_res_list *res_list_elem;
1500
1501 list_for_each_entry(res_list_elem, &resource_list_head,
1502 resource_list) {
1503
1504 if (res->resource_type == res_list_elem->resource_type &&
1505 res->start == res_list_elem->start &&
1506 res->end == res_list_elem->end) {
1507
1508 /*
1509 * If the res count is decreased to 0,
1510 * remove and free it
1511 */
1512
1513 if (--res_list_elem->count == 0) {
1514 list_del(&res_list_elem->resource_list);
1515 kfree(res_list_elem);
1516 }
1517 return;
1518 }
1519 }
1520}
1521
1522acpi_status
1523acpi_os_invalidate_address(
1524 u8 space_id,
1525 acpi_physical_address address,
1526 acpi_size length)
1527{
1528 struct acpi_res_list res;
1529
1530 switch (space_id) {
1531 case ACPI_ADR_SPACE_SYSTEM_IO:
1532 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1533 /* Only interference checks against SystemIO and SystemMemory
1534 are needed */
1535 res.start = address;
1536 res.end = address + length - 1;
1537 res.resource_type = space_id;
1538 spin_lock(&acpi_res_lock);
1539 acpi_res_list_del(&res);
1540 spin_unlock(&acpi_res_lock);
1541 break;
1542 case ACPI_ADR_SPACE_PCI_CONFIG:
1543 case ACPI_ADR_SPACE_EC:
1544 case ACPI_ADR_SPACE_SMBUS:
1545 case ACPI_ADR_SPACE_CMOS:
1546 case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1547 case ACPI_ADR_SPACE_DATA_TABLE:
1548 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1549 break;
1550 }
1551 return AE_OK;
1552}
1553
1554/******************************************************************************
1555 *
1556 * FUNCTION: acpi_os_validate_address
1557 *
1558 * PARAMETERS: space_id - ACPI space ID
1559 * address - Physical address
1560 * length - Address length
1561 *
1562 * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
1563 * should return AE_AML_ILLEGAL_ADDRESS.
1564 *
1565 * DESCRIPTION: Validate a system address via the host OS. Used to validate
1566 * the addresses accessed by AML operation regions.
1567 *
1568 *****************************************************************************/
1569
1570acpi_status
1571acpi_os_validate_address (
1572 u8 space_id,
1573 acpi_physical_address address,
1574 acpi_size length,
1575 char *name)
1576{
1577 struct acpi_res_list *res;
1578 int added;
1579 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1580 return AE_OK;
1581
1582 switch (space_id) {
1583 case ACPI_ADR_SPACE_SYSTEM_IO:
1584 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1585 /* Only interference checks against SystemIO and SystemMemory
1586 are needed */
1587 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1588 if (!res)
1589 return AE_OK;
1590 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1591 strlcpy(res->name, name, 5);
1592 res->start = address;
1593 res->end = address + length - 1;
1594 res->resource_type = space_id;
1595 spin_lock(&acpi_res_lock);
1596 added = acpi_res_list_add(res);
1597 spin_unlock(&acpi_res_lock);
1598 pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
1599 "name: %s\n", added ? "Added" : "Already exist",
1600 (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1601 ? "SystemIO" : "System Memory",
1602 (unsigned long long)res->start,
1603 (unsigned long long)res->end,
1604 res->name);
1605 if (!added)
1606 kfree(res);
1607 break;
1608 case ACPI_ADR_SPACE_PCI_CONFIG:
1609 case ACPI_ADR_SPACE_EC:
1610 case ACPI_ADR_SPACE_SMBUS:
1611 case ACPI_ADR_SPACE_CMOS:
1612 case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1613 case ACPI_ADR_SPACE_DATA_TABLE:
1614 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1615 break;
1616 }
1617 return AE_OK;
1618}
1619#endif 1453#endif
1620 1454
1621acpi_status __init acpi_os_initialize(void) 1455acpi_status __init acpi_os_initialize(void)
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 3a0428e8435..c850de4c9a1 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
173 apic_id = map_mat_entry(handle, type, acpi_id); 173 apic_id = map_mat_entry(handle, type, acpi_id);
174 if (apic_id == -1) 174 if (apic_id == -1)
175 apic_id = map_madt_entry(type, acpi_id); 175 apic_id = map_madt_entry(type, acpi_id);
176 if (apic_id == -1) 176 if (apic_id == -1) {
177 return apic_id; 177 /*
178 * On UP processor, there is no _MAT or MADT table.
179 * So above apic_id is always set to -1.
180 *
181 * BIOS may define multiple CPU handles even for UP processor.
182 * For example,
183 *
184 * Scope (_PR)
185 * {
186 * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
187 * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
188 * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
189 * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
190 * }
191 *
192 * Ignores apic_id and always return 0 for CPU0's handle.
193 * Return -1 for other CPU's handle.
194 */
195 if (acpi_id == 0)
196 return acpi_id;
197 else
198 return apic_id;
199 }
178 200
179#ifdef CONFIG_SMP 201#ifdef CONFIG_SMP
180 for_each_possible_cpu(i) { 202 for_each_possible_cpu(i) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 20a68ca386d..0034ede3871 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -82,7 +82,7 @@ MODULE_LICENSE("GPL");
82static int acpi_processor_add(struct acpi_device *device); 82static int acpi_processor_add(struct acpi_device *device);
83static int acpi_processor_remove(struct acpi_device *device, int type); 83static int acpi_processor_remove(struct acpi_device *device, int type);
84static void acpi_processor_notify(struct acpi_device *device, u32 event); 84static void acpi_processor_notify(struct acpi_device *device, u32 event);
85static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); 85static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
86static int acpi_processor_handle_eject(struct acpi_processor *pr); 86static int acpi_processor_handle_eject(struct acpi_processor *pr);
87 87
88 88
@@ -324,10 +324,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
324 * they are physically not present. 324 * they are physically not present.
325 */ 325 */
326 if (pr->id == -1) { 326 if (pr->id == -1) {
327 if (ACPI_FAILURE 327 if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
328 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
329 return -ENODEV; 328 return -ENODEV;
330 }
331 } 329 }
332 /* 330 /*
333 * On some boxes several processors use the same processor bus id. 331 * On some boxes several processors use the same processor bus id.
@@ -539,6 +537,7 @@ err_thermal_unregister:
539 thermal_cooling_device_unregister(pr->cdev); 537 thermal_cooling_device_unregister(pr->cdev);
540err_power_exit: 538err_power_exit:
541 acpi_processor_power_exit(pr, device); 539 acpi_processor_power_exit(pr, device);
540 sysfs_remove_link(&device->dev.kobj, "sysdev");
542err_free_cpumask: 541err_free_cpumask:
543 free_cpumask_var(pr->throttling.shared_cpu_map); 542 free_cpumask_var(pr->throttling.shared_cpu_map);
544 543
@@ -720,18 +719,19 @@ processor_walk_namespace_cb(acpi_handle handle,
720 return (AE_OK); 719 return (AE_OK);
721} 720}
722 721
723static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 722static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
724{ 723{
724 acpi_handle handle = pr->handle;
725 725
726 if (!is_processor_present(handle)) { 726 if (!is_processor_present(handle)) {
727 return AE_ERROR; 727 return AE_ERROR;
728 } 728 }
729 729
730 if (acpi_map_lsapic(handle, p_cpu)) 730 if (acpi_map_lsapic(handle, &pr->id))
731 return AE_ERROR; 731 return AE_ERROR;
732 732
733 if (arch_register_cpu(*p_cpu)) { 733 if (arch_register_cpu(pr->id)) {
734 acpi_unmap_lsapic(*p_cpu); 734 acpi_unmap_lsapic(pr->id);
735 return AE_ERROR; 735 return AE_ERROR;
736 } 736 }
737 737
@@ -748,7 +748,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr)
748 return (0); 748 return (0);
749} 749}
750#else 750#else
751static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 751static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
752{ 752{
753 return AE_ERROR; 753 return AE_ERROR;
754} 754}
@@ -827,8 +827,6 @@ static void __exit acpi_processor_exit(void)
827 827
828 acpi_bus_unregister_driver(&acpi_processor_driver); 828 acpi_bus_unregister_driver(&acpi_processor_driver);
829 829
830 cpuidle_unregister_driver(&acpi_idle_driver);
831
832 return; 830 return;
833} 831}
834 832
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 69ac373c72a..fdf27b9fce4 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1117,6 +1117,13 @@ static int piix_broken_suspend(void)
1117 }, 1117 },
1118 }, 1118 },
1119 { 1119 {
1120 .ident = "Satellite Pro A120",
1121 .matches = {
1122 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1123 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
1124 },
1125 },
1126 {
1120 .ident = "Portege M500", 1127 .ident = "Portege M500",
1121 .matches = { 1128 .matches = {
1122 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1129 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 11c9aea4f4f..c06e0ec1155 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4125,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4125 * device and controller are SATA. 4125 * device and controller are SATA.
4126 */ 4126 */
4127 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4127 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4128 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4129 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4128 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4130 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4129 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4131 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4130 4132
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 9a7f0ea565d..74aaee30e26 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -291,6 +291,7 @@ int ata_tport_add(struct device *parent,
291 goto tport_err; 291 goto tport_err;
292 } 292 }
293 293
294 device_enable_async_suspend(dev);
294 pm_runtime_set_active(dev); 295 pm_runtime_set_active(dev);
295 pm_runtime_enable(dev); 296 pm_runtime_enable(dev);
296 297
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index d6a4677fdf7..1e65842e2ca 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20;
251static const u32 udma_tackmin = 20; 251static const u32 udma_tackmin = 20;
252static const u32 udma_tssmin = 50; 252static const u32 udma_tssmin = 50;
253 253
254#define BFIN_MAX_SG_SEGMENTS 4
255
254/** 256/**
255 * 257 *
256 * Function: num_clocks_min 258 * Function: num_clocks_min
@@ -829,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
829 831
830static void bfin_bmdma_setup(struct ata_queued_cmd *qc) 832static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
831{ 833{
832 unsigned short config = WDSIZE_16; 834 struct ata_port *ap = qc->ap;
835 struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
836 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
837 unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
833 struct scatterlist *sg; 838 struct scatterlist *sg;
834 unsigned int si; 839 unsigned int si;
840 unsigned int channel;
841 unsigned int dir;
842 unsigned int size = 0;
835 843
836 dev_dbg(qc->ap->dev, "in atapi dma setup\n"); 844 dev_dbg(qc->ap->dev, "in atapi dma setup\n");
837 /* Program the ATA_CTRL register with dir */ 845 /* Program the ATA_CTRL register with dir */
838 if (qc->tf.flags & ATA_TFLAG_WRITE) { 846 if (qc->tf.flags & ATA_TFLAG_WRITE) {
839 /* fill the ATAPI DMA controller */ 847 channel = CH_ATAPI_TX;
840 set_dma_config(CH_ATAPI_TX, config); 848 dir = DMA_TO_DEVICE;
841 set_dma_x_modify(CH_ATAPI_TX, 2);
842 for_each_sg(qc->sg, sg, qc->n_elem, si) {
843 set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
844 set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
845 }
846 } else { 849 } else {
850 channel = CH_ATAPI_RX;
851 dir = DMA_FROM_DEVICE;
847 config |= WNR; 852 config |= WNR;
848 /* fill the ATAPI DMA controller */
849 set_dma_config(CH_ATAPI_RX, config);
850 set_dma_x_modify(CH_ATAPI_RX, 2);
851 for_each_sg(qc->sg, sg, qc->n_elem, si) {
852 set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
853 set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
854 }
855 } 853 }
856}
857 854
858/** 855 dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
859 * bfin_bmdma_start - Start an IDE DMA transaction
860 * @qc: Info associated with this ATA transaction.
861 *
862 * Note: Original code is ata_bmdma_start().
863 */
864 856
865static void bfin_bmdma_start(struct ata_queued_cmd *qc) 857 /* fill the ATAPI DMA controller */
866{ 858 for_each_sg(qc->sg, sg, qc->n_elem, si) {
867 struct ata_port *ap = qc->ap; 859 dma_desc_cpu[si].start_addr = sg_dma_address(sg);
868 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 860 dma_desc_cpu[si].cfg = config;
869 struct scatterlist *sg; 861 dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
870 unsigned int si; 862 dma_desc_cpu[si].x_modify = 2;
863 size += sg_dma_len(sg);
864 }
871 865
872 dev_dbg(qc->ap->dev, "in atapi dma start\n"); 866 /* Set the last descriptor to stop mode */
873 if (!(ap->udma_mask || ap->mwdma_mask)) 867 dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
874 return;
875 868
876 /* start ATAPI DMA controller*/ 869 flush_dcache_range((unsigned int)dma_desc_cpu,
877 if (qc->tf.flags & ATA_TFLAG_WRITE) { 870 (unsigned int)dma_desc_cpu +
878 /* 871 qc->n_elem * sizeof(struct dma_desc_array));
879 * On blackfin arch, uncacheable memory is not
880 * allocated with flag GFP_DMA. DMA buffer from
881 * common kenel code should be flushed if WB
882 * data cache is enabled. Otherwise, this loop
883 * is an empty loop and optimized out.
884 */
885 for_each_sg(qc->sg, sg, qc->n_elem, si) {
886 flush_dcache_range(sg_dma_address(sg),
887 sg_dma_address(sg) + sg_dma_len(sg));
888 }
889 enable_dma(CH_ATAPI_TX);
890 dev_dbg(qc->ap->dev, "enable udma write\n");
891 872
892 /* Send ATA DMA write command */ 873 /* Enable ATA DMA operation*/
893 bfin_exec_command(ap, &qc->tf); 874 set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
875 set_dma_x_count(channel, 0);
876 set_dma_x_modify(channel, 0);
877 set_dma_config(channel, config);
878
879 SSYNC();
880
881 /* Send ATA DMA command */
882 bfin_exec_command(ap, &qc->tf);
894 883
884 if (qc->tf.flags & ATA_TFLAG_WRITE) {
895 /* set ATA DMA write direction */ 885 /* set ATA DMA write direction */
896 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) 886 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
897 | XFER_DIR)); 887 | XFER_DIR));
898 } else { 888 } else {
899 enable_dma(CH_ATAPI_RX);
900 dev_dbg(qc->ap->dev, "enable udma read\n");
901
902 /* Send ATA DMA read command */
903 bfin_exec_command(ap, &qc->tf);
904
905 /* set ATA DMA read direction */ 889 /* set ATA DMA read direction */
906 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) 890 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
907 & ~XFER_DIR)); 891 & ~XFER_DIR));
@@ -913,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
913 /* Set ATAPI state machine contorl in terminate sequence */ 897 /* Set ATAPI state machine contorl in terminate sequence */
914 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM); 898 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
915 899
916 /* Set transfer length to buffer len */ 900 /* Set transfer length to the total size of sg buffers */
917 for_each_sg(qc->sg, sg, qc->n_elem, si) { 901 ATAPI_SET_XFER_LEN(base, size >> 1);
918 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); 902}
919 }
920 903
921 /* Enable ATA DMA operation*/ 904/**
905 * bfin_bmdma_start - Start an IDE DMA transaction
906 * @qc: Info associated with this ATA transaction.
907 *
908 * Note: Original code is ata_bmdma_start().
909 */
910
911static void bfin_bmdma_start(struct ata_queued_cmd *qc)
912{
913 struct ata_port *ap = qc->ap;
914 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
915
916 dev_dbg(qc->ap->dev, "in atapi dma start\n");
917
918 if (!(ap->udma_mask || ap->mwdma_mask))
919 return;
920
921 /* start ATAPI transfer*/
922 if (ap->udma_mask) 922 if (ap->udma_mask)
923 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) 923 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
924 | ULTRA_START); 924 | ULTRA_START);
@@ -935,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
935static void bfin_bmdma_stop(struct ata_queued_cmd *qc) 935static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
936{ 936{
937 struct ata_port *ap = qc->ap; 937 struct ata_port *ap = qc->ap;
938 struct scatterlist *sg; 938 unsigned int dir;
939 unsigned int si;
940 939
941 dev_dbg(qc->ap->dev, "in atapi dma stop\n"); 940 dev_dbg(qc->ap->dev, "in atapi dma stop\n");
941
942 if (!(ap->udma_mask || ap->mwdma_mask)) 942 if (!(ap->udma_mask || ap->mwdma_mask))
943 return; 943 return;
944 944
945 /* stop ATAPI DMA controller*/ 945 /* stop ATAPI DMA controller*/
946 if (qc->tf.flags & ATA_TFLAG_WRITE) 946 if (qc->tf.flags & ATA_TFLAG_WRITE) {
947 dir = DMA_TO_DEVICE;
947 disable_dma(CH_ATAPI_TX); 948 disable_dma(CH_ATAPI_TX);
948 else { 949 } else {
950 dir = DMA_FROM_DEVICE;
949 disable_dma(CH_ATAPI_RX); 951 disable_dma(CH_ATAPI_RX);
950 if (ap->hsm_task_state & HSM_ST_LAST) {
951 /*
952 * On blackfin arch, uncacheable memory is not
953 * allocated with flag GFP_DMA. DMA buffer from
954 * common kenel code should be invalidated if
955 * data cache is enabled. Otherwise, this loop
956 * is an empty loop and optimized out.
957 */
958 for_each_sg(qc->sg, sg, qc->n_elem, si) {
959 invalidate_dcache_range(
960 sg_dma_address(sg),
961 sg_dma_address(sg)
962 + sg_dma_len(sg));
963 }
964 }
965 } 952 }
953
954 dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
966} 955}
967 956
968/** 957/**
@@ -1260,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap)
1260{ 1249{
1261 dev_dbg(ap->dev, "in atapi port stop\n"); 1250 dev_dbg(ap->dev, "in atapi port stop\n");
1262 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { 1251 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
1252 dma_free_coherent(ap->dev,
1253 BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
1254 ap->bmdma_prd,
1255 ap->bmdma_prd_dma);
1256
1263 free_dma(CH_ATAPI_RX); 1257 free_dma(CH_ATAPI_RX);
1264 free_dma(CH_ATAPI_TX); 1258 free_dma(CH_ATAPI_TX);
1265 } 1259 }
@@ -1271,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap)
1271 if (!(ap->udma_mask || ap->mwdma_mask)) 1265 if (!(ap->udma_mask || ap->mwdma_mask))
1272 return 0; 1266 return 0;
1273 1267
1268 ap->bmdma_prd = dma_alloc_coherent(ap->dev,
1269 BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
1270 &ap->bmdma_prd_dma,
1271 GFP_KERNEL);
1272
1273 if (ap->bmdma_prd == NULL) {
1274 dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
1275 goto out;
1276 }
1277
1274 if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { 1278 if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
1275 if (request_dma(CH_ATAPI_TX, 1279 if (request_dma(CH_ATAPI_TX,
1276 "BFIN ATAPI TX DMA") >= 0) 1280 "BFIN ATAPI TX DMA") >= 0)
1277 return 0; 1281 return 0;
1278 1282
1279 free_dma(CH_ATAPI_RX); 1283 free_dma(CH_ATAPI_RX);
1284 dma_free_coherent(ap->dev,
1285 BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
1286 ap->bmdma_prd,
1287 ap->bmdma_prd_dma);
1280 } 1288 }
1281 1289
1290out:
1282 ap->udma_mask = 0; 1291 ap->udma_mask = 0;
1283 ap->mwdma_mask = 0; 1292 ap->mwdma_mask = 0;
1284 dev_err(ap->dev, "Unable to request ATAPI DMA!" 1293 dev_err(ap->dev, "Unable to request ATAPI DMA!"
@@ -1400,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
1400 1409
1401static struct scsi_host_template bfin_sht = { 1410static struct scsi_host_template bfin_sht = {
1402 ATA_BASE_SHT(DRV_NAME), 1411 ATA_BASE_SHT(DRV_NAME),
1403 .sg_tablesize = SG_NONE, 1412 .sg_tablesize = BFIN_MAX_SG_SEGMENTS,
1404 .dma_boundary = ATA_DMA_BOUNDARY, 1413 .dma_boundary = ATA_DMA_BOUNDARY,
1405}; 1414};
1406 1415
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 5a2c95ba050..0120b0d1e9a 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -140,6 +140,7 @@ enum {
140 */ 140 */
141 HCONTROL_ONLINE_PHY_RST = (1 << 31), 141 HCONTROL_ONLINE_PHY_RST = (1 << 31),
142 HCONTROL_FORCE_OFFLINE = (1 << 30), 142 HCONTROL_FORCE_OFFLINE = (1 << 30),
143 HCONTROL_LEGACY = (1 << 28),
143 HCONTROL_PARITY_PROT_MOD = (1 << 14), 144 HCONTROL_PARITY_PROT_MOD = (1 << 14),
144 HCONTROL_DPATH_PARITY = (1 << 12), 145 HCONTROL_DPATH_PARITY = (1 << 12),
145 HCONTROL_SNOOP_ENABLE = (1 << 10), 146 HCONTROL_SNOOP_ENABLE = (1 << 10),
@@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host)
1223 * part of the port_start() callback 1224 * part of the port_start() callback
1224 */ 1225 */
1225 1226
1227 /* sata controller to operate in enterprise mode */
1228 temp = ioread32(hcr_base + HCONTROL);
1229 iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
1230
1226 /* ack. any pending IRQs for this controller/port */ 1231 /* ack. any pending IRQs for this controller/port */
1227 temp = ioread32(hcr_base + HSTATUS); 1232 temp = ioread32(hcr_base + HSTATUS);
1228 if (temp & 0x3F) 1233 if (temp & 0x3F)
@@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op)
1421 /* Recovery the CHBA register in host controller cmd register set */ 1426 /* Recovery the CHBA register in host controller cmd register set */
1422 iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); 1427 iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
1423 1428
1429 iowrite32((ioread32(hcr_base + HCONTROL)
1430 | HCONTROL_ONLINE_PHY_RST
1431 | HCONTROL_SNOOP_ENABLE
1432 | HCONTROL_PMP_ATTACHED),
1433 hcr_base + HCONTROL);
1434
1424 ata_host_resume(host); 1435 ata_host_resume(host);
1425 return 0; 1436 return 0;
1426} 1437}
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index fda56bde36b..0def898a1d1 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -19,6 +19,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
19 struct bcma_device *core_cc, 19 struct bcma_device *core_cc,
20 struct bcma_device *core_mips); 20 struct bcma_device *core_mips);
21#ifdef CONFIG_PM 21#ifdef CONFIG_PM
22int bcma_bus_suspend(struct bcma_bus *bus);
22int bcma_bus_resume(struct bcma_bus *bus); 23int bcma_bus_resume(struct bcma_bus *bus);
23#endif 24#endif
24 25
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 443b83a2fd7..f59244e3397 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -235,38 +235,32 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
235} 235}
236 236
237#ifdef CONFIG_PM 237#ifdef CONFIG_PM
238static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state) 238static int bcma_host_pci_suspend(struct device *dev)
239{ 239{
240 /* Host specific */ 240 struct pci_dev *pdev = to_pci_dev(dev);
241 pci_save_state(dev); 241 struct bcma_bus *bus = pci_get_drvdata(pdev);
242 pci_disable_device(dev);
243 pci_set_power_state(dev, pci_choose_state(dev, state));
244 242
245 return 0; 243 bus->mapped_core = NULL;
244
245 return bcma_bus_suspend(bus);
246} 246}
247 247
248static int bcma_host_pci_resume(struct pci_dev *dev) 248static int bcma_host_pci_resume(struct device *dev)
249{ 249{
250 struct bcma_bus *bus = pci_get_drvdata(dev); 250 struct pci_dev *pdev = to_pci_dev(dev);
251 int err; 251 struct bcma_bus *bus = pci_get_drvdata(pdev);
252 252
253 /* Host specific */ 253 return bcma_bus_resume(bus);
254 pci_set_power_state(dev, 0); 254}
255 err = pci_enable_device(dev);
256 if (err)
257 return err;
258 pci_restore_state(dev);
259 255
260 /* Bus specific */ 256static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
261 err = bcma_bus_resume(bus); 257 bcma_host_pci_resume);
262 if (err) 258#define BCMA_PM_OPS (&bcma_pm_ops)
263 return err;
264 259
265 return 0;
266}
267#else /* CONFIG_PM */ 260#else /* CONFIG_PM */
268# define bcma_host_pci_suspend NULL 261
269# define bcma_host_pci_resume NULL 262#define BCMA_PM_OPS NULL
263
270#endif /* CONFIG_PM */ 264#endif /* CONFIG_PM */
271 265
272static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { 266static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
@@ -284,8 +278,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
284 .id_table = bcma_pci_bridge_tbl, 278 .id_table = bcma_pci_bridge_tbl,
285 .probe = bcma_host_pci_probe, 279 .probe = bcma_host_pci_probe,
286 .remove = bcma_host_pci_remove, 280 .remove = bcma_host_pci_remove,
287 .suspend = bcma_host_pci_suspend, 281 .driver.pm = BCMA_PM_OPS,
288 .resume = bcma_host_pci_resume,
289}; 282};
290 283
291int __init bcma_host_pci_init(void) 284int __init bcma_host_pci_init(void)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 10f92b371e5..febbc0a1222 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -241,6 +241,21 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
241} 241}
242 242
243#ifdef CONFIG_PM 243#ifdef CONFIG_PM
244int bcma_bus_suspend(struct bcma_bus *bus)
245{
246 struct bcma_device *core;
247
248 list_for_each_entry(core, &bus->cores, list) {
249 struct device_driver *drv = core->dev.driver;
250 if (drv) {
251 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
252 if (adrv->suspend)
253 adrv->suspend(core);
254 }
255 }
256 return 0;
257}
258
244int bcma_bus_resume(struct bcma_bus *bus) 259int bcma_bus_resume(struct bcma_bus *bus)
245{ 260{
246 struct bcma_device *core; 261 struct bcma_device *core;
@@ -252,6 +267,15 @@ int bcma_bus_resume(struct bcma_bus *bus)
252 bcma_core_chipcommon_init(&bus->drv_cc); 267 bcma_core_chipcommon_init(&bus->drv_cc);
253 } 268 }
254 269
270 list_for_each_entry(core, &bus->cores, list) {
271 struct device_driver *drv = core->dev.driver;
272 if (drv) {
273 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
274 if (adrv->resume)
275 adrv->resume(core);
276 }
277 }
278
255 return 0; 279 return 0;
256} 280}
257#endif 281#endif
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a30aa103f95..4e4c8a4a5fd 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -317,6 +317,17 @@ config BLK_DEV_NBD
317 317
318 If unsure, say N. 318 If unsure, say N.
319 319
320config BLK_DEV_NVME
321 tristate "NVM Express block device"
322 depends on PCI
323 ---help---
324 The NVM Express driver is for solid state drives directly
325 connected to the PCI or PCI Express bus. If you know you
326 don't have one of these, it is safe to answer N.
327
328 To compile this driver as a module, choose M here: the
329 module will be called nvme.
330
320config BLK_DEV_OSD 331config BLK_DEV_OSD
321 tristate "OSD object-as-blkdev support" 332 tristate "OSD object-as-blkdev support"
322 depends on SCSI_OSD_ULD 333 depends on SCSI_OSD_ULD
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ad7b74a44ef..5b795059f8f 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
23obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o 23obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
24obj-$(CONFIG_MG_DISK) += mg_disk.o 24obj-$(CONFIG_MG_DISK) += mg_disk.o
25obj-$(CONFIG_SUNVDC) += sunvdc.o 25obj-$(CONFIG_SUNVDC) += sunvdc.o
26obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
26obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o 27obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
27 28
28obj-$(CONFIG_BLK_DEV_UMEM) += umem.o 29obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
new file mode 100644
index 00000000000..c1dc4d86c22
--- /dev/null
+++ b/drivers/block/nvme.c
@@ -0,0 +1,1739 @@
1/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
21#include <linux/bitops.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/genhd.h>
27#include <linux/idr.h>
28#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/kdev_t.h>
32#include <linux/kthread.h>
33#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
38#include <linux/poison.h>
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/types.h>
42#include <linux/version.h>
43
44#define NVME_Q_DEPTH 1024
45#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
46#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
47#define NVME_MINORS 64
48#define NVME_IO_TIMEOUT (5 * HZ)
49#define ADMIN_TIMEOUT (60 * HZ)
50
51static int nvme_major;
52module_param(nvme_major, int, 0);
53
54static int use_threaded_interrupts;
55module_param(use_threaded_interrupts, int, 0);
56
57static DEFINE_SPINLOCK(dev_list_lock);
58static LIST_HEAD(dev_list);
59static struct task_struct *nvme_thread;
60
61/*
62 * Represents an NVM Express device. Each nvme_dev is a PCI function.
63 */
64struct nvme_dev {
65 struct list_head node;
66 struct nvme_queue **queues;
67 u32 __iomem *dbs;
68 struct pci_dev *pci_dev;
69 struct dma_pool *prp_page_pool;
70 struct dma_pool *prp_small_pool;
71 int instance;
72 int queue_count;
73 int db_stride;
74 u32 ctrl_config;
75 struct msix_entry *entry;
76 struct nvme_bar __iomem *bar;
77 struct list_head namespaces;
78 char serial[20];
79 char model[40];
80 char firmware_rev[8];
81};
82
83/*
84 * An NVM Express namespace is equivalent to a SCSI LUN
85 */
86struct nvme_ns {
87 struct list_head list;
88
89 struct nvme_dev *dev;
90 struct request_queue *queue;
91 struct gendisk *disk;
92
93 int ns_id;
94 int lba_shift;
95};
96
97/*
98 * An NVM Express queue. Each device has at least two (one for admin
99 * commands and one for I/O commands).
100 */
101struct nvme_queue {
102 struct device *q_dmadev;
103 struct nvme_dev *dev;
104 spinlock_t q_lock;
105 struct nvme_command *sq_cmds;
106 volatile struct nvme_completion *cqes;
107 dma_addr_t sq_dma_addr;
108 dma_addr_t cq_dma_addr;
109 wait_queue_head_t sq_full;
110 wait_queue_t sq_cong_wait;
111 struct bio_list sq_cong;
112 u32 __iomem *q_db;
113 u16 q_depth;
114 u16 cq_vector;
115 u16 sq_head;
116 u16 sq_tail;
117 u16 cq_head;
118 u16 cq_phase;
119 unsigned long cmdid_data[];
120};
121
122/*
123 * Check we didin't inadvertently grow the command struct
124 */
125static inline void _nvme_check_size(void)
126{
127 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
128 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
129 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
130 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
131 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
132 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
133 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
134 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
135 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
136}
137
138typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
139 struct nvme_completion *);
140
141struct nvme_cmd_info {
142 nvme_completion_fn fn;
143 void *ctx;
144 unsigned long timeout;
145};
146
147static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
148{
149 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
150}
151
152/**
153 * alloc_cmdid() - Allocate a Command ID
154 * @nvmeq: The queue that will be used for this command
155 * @ctx: A pointer that will be passed to the handler
156 * @handler: The function to call on completion
157 *
158 * Allocate a Command ID for a queue. The data passed in will
159 * be passed to the completion handler. This is implemented by using
160 * the bottom two bits of the ctx pointer to store the handler ID.
161 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
162 * We can change this if it becomes a problem.
163 *
164 * May be called with local interrupts disabled and the q_lock held,
165 * or with interrupts enabled and no locks held.
166 */
167static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
168 nvme_completion_fn handler, unsigned timeout)
169{
170 int depth = nvmeq->q_depth - 1;
171 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
172 int cmdid;
173
174 do {
175 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
176 if (cmdid >= depth)
177 return -EBUSY;
178 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
179
180 info[cmdid].fn = handler;
181 info[cmdid].ctx = ctx;
182 info[cmdid].timeout = jiffies + timeout;
183 return cmdid;
184}
185
186static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
187 nvme_completion_fn handler, unsigned timeout)
188{
189 int cmdid;
190 wait_event_killable(nvmeq->sq_full,
191 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
192 return (cmdid < 0) ? -EINTR : cmdid;
193}
194
195/* Special values must be less than 0x1000 */
196#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
197#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
198#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
199#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
200#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
201
202static void special_completion(struct nvme_dev *dev, void *ctx,
203 struct nvme_completion *cqe)
204{
205 if (ctx == CMD_CTX_CANCELLED)
206 return;
207 if (ctx == CMD_CTX_FLUSH)
208 return;
209 if (ctx == CMD_CTX_COMPLETED) {
210 dev_warn(&dev->pci_dev->dev,
211 "completed id %d twice on queue %d\n",
212 cqe->command_id, le16_to_cpup(&cqe->sq_id));
213 return;
214 }
215 if (ctx == CMD_CTX_INVALID) {
216 dev_warn(&dev->pci_dev->dev,
217 "invalid id %d completed on queue %d\n",
218 cqe->command_id, le16_to_cpup(&cqe->sq_id));
219 return;
220 }
221
222 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
223}
224
225/*
226 * Called with local interrupts disabled and the q_lock held. May not sleep.
227 */
228static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
229 nvme_completion_fn *fn)
230{
231 void *ctx;
232 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
233
234 if (cmdid >= nvmeq->q_depth) {
235 *fn = special_completion;
236 return CMD_CTX_INVALID;
237 }
238 *fn = info[cmdid].fn;
239 ctx = info[cmdid].ctx;
240 info[cmdid].fn = special_completion;
241 info[cmdid].ctx = CMD_CTX_COMPLETED;
242 clear_bit(cmdid, nvmeq->cmdid_data);
243 wake_up(&nvmeq->sq_full);
244 return ctx;
245}
246
247static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
248 nvme_completion_fn *fn)
249{
250 void *ctx;
251 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
252 if (fn)
253 *fn = info[cmdid].fn;
254 ctx = info[cmdid].ctx;
255 info[cmdid].fn = special_completion;
256 info[cmdid].ctx = CMD_CTX_CANCELLED;
257 return ctx;
258}
259
260static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
261{
262 return dev->queues[get_cpu() + 1];
263}
264
265static void put_nvmeq(struct nvme_queue *nvmeq)
266{
267 put_cpu();
268}
269
270/**
271 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
272 * @nvmeq: The queue to use
273 * @cmd: The command to send
274 *
275 * Safe to use from interrupt context
276 */
277static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
278{
279 unsigned long flags;
280 u16 tail;
281 spin_lock_irqsave(&nvmeq->q_lock, flags);
282 tail = nvmeq->sq_tail;
283 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
284 if (++tail == nvmeq->q_depth)
285 tail = 0;
286 writel(tail, nvmeq->q_db);
287 nvmeq->sq_tail = tail;
288 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
289
290 return 0;
291}
292
293/*
294 * The nvme_iod describes the data in an I/O, including the list of PRP
295 * entries. You can't see it in this data structure because C doesn't let
296 * me express that. Use nvme_alloc_iod to ensure there's enough space
297 * allocated to store the PRP list.
298 */
299struct nvme_iod {
300 void *private; /* For the use of the submitter of the I/O */
301 int npages; /* In the PRP list. 0 means small pool in use */
302 int offset; /* Of PRP list */
303 int nents; /* Used in scatterlist */
304 int length; /* Of data, in bytes */
305 dma_addr_t first_dma;
306 struct scatterlist sg[0];
307};
308
309static __le64 **iod_list(struct nvme_iod *iod)
310{
311 return ((void *)iod) + iod->offset;
312}
313
314/*
315 * Will slightly overestimate the number of pages needed. This is OK
316 * as it only leads to a small amount of wasted memory for the lifetime of
317 * the I/O.
318 */
319static int nvme_npages(unsigned size)
320{
321 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
322 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
323}
324
325static struct nvme_iod *
326nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
327{
328 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
329 sizeof(__le64 *) * nvme_npages(nbytes) +
330 sizeof(struct scatterlist) * nseg, gfp);
331
332 if (iod) {
333 iod->offset = offsetof(struct nvme_iod, sg[nseg]);
334 iod->npages = -1;
335 iod->length = nbytes;
336 }
337
338 return iod;
339}
340
341static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
342{
343 const int last_prp = PAGE_SIZE / 8 - 1;
344 int i;
345 __le64 **list = iod_list(iod);
346 dma_addr_t prp_dma = iod->first_dma;
347
348 if (iod->npages == 0)
349 dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
350 for (i = 0; i < iod->npages; i++) {
351 __le64 *prp_list = list[i];
352 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
353 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
354 prp_dma = next_prp_dma;
355 }
356 kfree(iod);
357}
358
359static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
360{
361 struct nvme_queue *nvmeq = get_nvmeq(dev);
362 if (bio_list_empty(&nvmeq->sq_cong))
363 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
364 bio_list_add(&nvmeq->sq_cong, bio);
365 put_nvmeq(nvmeq);
366 wake_up_process(nvme_thread);
367}
368
369static void bio_completion(struct nvme_dev *dev, void *ctx,
370 struct nvme_completion *cqe)
371{
372 struct nvme_iod *iod = ctx;
373 struct bio *bio = iod->private;
374 u16 status = le16_to_cpup(&cqe->status) >> 1;
375
376 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
377 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
378 nvme_free_iod(dev, iod);
379 if (status) {
380 bio_endio(bio, -EIO);
381 } else if (bio->bi_vcnt > bio->bi_idx) {
382 requeue_bio(dev, bio);
383 } else {
384 bio_endio(bio, 0);
385 }
386}
387
388/* length is in bytes. gfp flags indicates whether we may sleep. */
389static int nvme_setup_prps(struct nvme_dev *dev,
390 struct nvme_common_command *cmd, struct nvme_iod *iod,
391 int total_len, gfp_t gfp)
392{
393 struct dma_pool *pool;
394 int length = total_len;
395 struct scatterlist *sg = iod->sg;
396 int dma_len = sg_dma_len(sg);
397 u64 dma_addr = sg_dma_address(sg);
398 int offset = offset_in_page(dma_addr);
399 __le64 *prp_list;
400 __le64 **list = iod_list(iod);
401 dma_addr_t prp_dma;
402 int nprps, i;
403
404 cmd->prp1 = cpu_to_le64(dma_addr);
405 length -= (PAGE_SIZE - offset);
406 if (length <= 0)
407 return total_len;
408
409 dma_len -= (PAGE_SIZE - offset);
410 if (dma_len) {
411 dma_addr += (PAGE_SIZE - offset);
412 } else {
413 sg = sg_next(sg);
414 dma_addr = sg_dma_address(sg);
415 dma_len = sg_dma_len(sg);
416 }
417
418 if (length <= PAGE_SIZE) {
419 cmd->prp2 = cpu_to_le64(dma_addr);
420 return total_len;
421 }
422
423 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
424 if (nprps <= (256 / 8)) {
425 pool = dev->prp_small_pool;
426 iod->npages = 0;
427 } else {
428 pool = dev->prp_page_pool;
429 iod->npages = 1;
430 }
431
432 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
433 if (!prp_list) {
434 cmd->prp2 = cpu_to_le64(dma_addr);
435 iod->npages = -1;
436 return (total_len - length) + PAGE_SIZE;
437 }
438 list[0] = prp_list;
439 iod->first_dma = prp_dma;
440 cmd->prp2 = cpu_to_le64(prp_dma);
441 i = 0;
442 for (;;) {
443 if (i == PAGE_SIZE / 8) {
444 __le64 *old_prp_list = prp_list;
445 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
446 if (!prp_list)
447 return total_len - length;
448 list[iod->npages++] = prp_list;
449 prp_list[0] = old_prp_list[i - 1];
450 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
451 i = 1;
452 }
453 prp_list[i++] = cpu_to_le64(dma_addr);
454 dma_len -= PAGE_SIZE;
455 dma_addr += PAGE_SIZE;
456 length -= PAGE_SIZE;
457 if (length <= 0)
458 break;
459 if (dma_len > 0)
460 continue;
461 BUG_ON(dma_len < 0);
462 sg = sg_next(sg);
463 dma_addr = sg_dma_address(sg);
464 dma_len = sg_dma_len(sg);
465 }
466
467 return total_len;
468}
469
470/* NVMe scatterlists require no holes in the virtual address */
471#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
472 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
473
474static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
475 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
476{
477 struct bio_vec *bvec, *bvprv = NULL;
478 struct scatterlist *sg = NULL;
479 int i, old_idx, length = 0, nsegs = 0;
480
481 sg_init_table(iod->sg, psegs);
482 old_idx = bio->bi_idx;
483 bio_for_each_segment(bvec, bio, i) {
484 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
485 sg->length += bvec->bv_len;
486 } else {
487 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
488 break;
489 sg = sg ? sg + 1 : iod->sg;
490 sg_set_page(sg, bvec->bv_page, bvec->bv_len,
491 bvec->bv_offset);
492 nsegs++;
493 }
494 length += bvec->bv_len;
495 bvprv = bvec;
496 }
497 bio->bi_idx = i;
498 iod->nents = nsegs;
499 sg_mark_end(sg);
500 if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
501 bio->bi_idx = old_idx;
502 return -ENOMEM;
503 }
504 return length;
505}
506
507static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
508 int cmdid)
509{
510 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
511
512 memset(cmnd, 0, sizeof(*cmnd));
513 cmnd->common.opcode = nvme_cmd_flush;
514 cmnd->common.command_id = cmdid;
515 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
516
517 if (++nvmeq->sq_tail == nvmeq->q_depth)
518 nvmeq->sq_tail = 0;
519 writel(nvmeq->sq_tail, nvmeq->q_db);
520
521 return 0;
522}
523
524static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
525{
526 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
527 special_completion, NVME_IO_TIMEOUT);
528 if (unlikely(cmdid < 0))
529 return cmdid;
530
531 return nvme_submit_flush(nvmeq, ns, cmdid);
532}
533
534/*
535 * Called with local interrupts disabled and the q_lock held. May not sleep.
536 */
537static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
538 struct bio *bio)
539{
540 struct nvme_command *cmnd;
541 struct nvme_iod *iod;
542 enum dma_data_direction dma_dir;
543 int cmdid, length, result = -ENOMEM;
544 u16 control;
545 u32 dsmgmt;
546 int psegs = bio_phys_segments(ns->queue, bio);
547
548 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
549 result = nvme_submit_flush_data(nvmeq, ns);
550 if (result)
551 return result;
552 }
553
554 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
555 if (!iod)
556 goto nomem;
557 iod->private = bio;
558
559 result = -EBUSY;
560 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
561 if (unlikely(cmdid < 0))
562 goto free_iod;
563
564 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
565 return nvme_submit_flush(nvmeq, ns, cmdid);
566
567 control = 0;
568 if (bio->bi_rw & REQ_FUA)
569 control |= NVME_RW_FUA;
570 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
571 control |= NVME_RW_LR;
572
573 dsmgmt = 0;
574 if (bio->bi_rw & REQ_RAHEAD)
575 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
576
577 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
578
579 memset(cmnd, 0, sizeof(*cmnd));
580 if (bio_data_dir(bio)) {
581 cmnd->rw.opcode = nvme_cmd_write;
582 dma_dir = DMA_TO_DEVICE;
583 } else {
584 cmnd->rw.opcode = nvme_cmd_read;
585 dma_dir = DMA_FROM_DEVICE;
586 }
587
588 result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
589 if (result < 0)
590 goto free_iod;
591 length = result;
592
593 cmnd->rw.command_id = cmdid;
594 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
595 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
596 GFP_ATOMIC);
597 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
598 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
599 cmnd->rw.control = cpu_to_le16(control);
600 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
601
602 bio->bi_sector += length >> 9;
603
604 if (++nvmeq->sq_tail == nvmeq->q_depth)
605 nvmeq->sq_tail = 0;
606 writel(nvmeq->sq_tail, nvmeq->q_db);
607
608 return 0;
609
610 free_iod:
611 nvme_free_iod(nvmeq->dev, iod);
612 nomem:
613 return result;
614}
615
616static void nvme_make_request(struct request_queue *q, struct bio *bio)
617{
618 struct nvme_ns *ns = q->queuedata;
619 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
620 int result = -EBUSY;
621
622 spin_lock_irq(&nvmeq->q_lock);
623 if (bio_list_empty(&nvmeq->sq_cong))
624 result = nvme_submit_bio_queue(nvmeq, ns, bio);
625 if (unlikely(result)) {
626 if (bio_list_empty(&nvmeq->sq_cong))
627 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
628 bio_list_add(&nvmeq->sq_cong, bio);
629 }
630
631 spin_unlock_irq(&nvmeq->q_lock);
632 put_nvmeq(nvmeq);
633}
634
635static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
636{
637 u16 head, phase;
638
639 head = nvmeq->cq_head;
640 phase = nvmeq->cq_phase;
641
642 for (;;) {
643 void *ctx;
644 nvme_completion_fn fn;
645 struct nvme_completion cqe = nvmeq->cqes[head];
646 if ((le16_to_cpu(cqe.status) & 1) != phase)
647 break;
648 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
649 if (++head == nvmeq->q_depth) {
650 head = 0;
651 phase = !phase;
652 }
653
654 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
655 fn(nvmeq->dev, ctx, &cqe);
656 }
657
658 /* If the controller ignores the cq head doorbell and continuously
659 * writes to the queue, it is theoretically possible to wrap around
660 * the queue twice and mistakenly return IRQ_NONE. Linux only
661 * requires that 0.1% of your interrupts are handled, so this isn't
662 * a big problem.
663 */
664 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
665 return IRQ_NONE;
666
667 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
668 nvmeq->cq_head = head;
669 nvmeq->cq_phase = phase;
670
671 return IRQ_HANDLED;
672}
673
674static irqreturn_t nvme_irq(int irq, void *data)
675{
676 irqreturn_t result;
677 struct nvme_queue *nvmeq = data;
678 spin_lock(&nvmeq->q_lock);
679 result = nvme_process_cq(nvmeq);
680 spin_unlock(&nvmeq->q_lock);
681 return result;
682}
683
684static irqreturn_t nvme_irq_check(int irq, void *data)
685{
686 struct nvme_queue *nvmeq = data;
687 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
688 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
689 return IRQ_NONE;
690 return IRQ_WAKE_THREAD;
691}
692
693static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
694{
695 spin_lock_irq(&nvmeq->q_lock);
696 cancel_cmdid(nvmeq, cmdid, NULL);
697 spin_unlock_irq(&nvmeq->q_lock);
698}
699
700struct sync_cmd_info {
701 struct task_struct *task;
702 u32 result;
703 int status;
704};
705
706static void sync_completion(struct nvme_dev *dev, void *ctx,
707 struct nvme_completion *cqe)
708{
709 struct sync_cmd_info *cmdinfo = ctx;
710 cmdinfo->result = le32_to_cpup(&cqe->result);
711 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
712 wake_up_process(cmdinfo->task);
713}
714
715/*
716 * Returns 0 on success. If the result is negative, it's a Linux error code;
717 * if the result is positive, it's an NVM Express status code
718 */
719static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
720 struct nvme_command *cmd, u32 *result, unsigned timeout)
721{
722 int cmdid;
723 struct sync_cmd_info cmdinfo;
724
725 cmdinfo.task = current;
726 cmdinfo.status = -EINTR;
727
728 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
729 timeout);
730 if (cmdid < 0)
731 return cmdid;
732 cmd->common.command_id = cmdid;
733
734 set_current_state(TASK_KILLABLE);
735 nvme_submit_cmd(nvmeq, cmd);
736 schedule();
737
738 if (cmdinfo.status == -EINTR) {
739 nvme_abort_command(nvmeq, cmdid);
740 return -EINTR;
741 }
742
743 if (result)
744 *result = cmdinfo.result;
745
746 return cmdinfo.status;
747}
748
749static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
750 u32 *result)
751{
752 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
753}
754
755static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
756{
757 int status;
758 struct nvme_command c;
759
760 memset(&c, 0, sizeof(c));
761 c.delete_queue.opcode = opcode;
762 c.delete_queue.qid = cpu_to_le16(id);
763
764 status = nvme_submit_admin_cmd(dev, &c, NULL);
765 if (status)
766 return -EIO;
767 return 0;
768}
769
770static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
771 struct nvme_queue *nvmeq)
772{
773 int status;
774 struct nvme_command c;
775 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
776
777 memset(&c, 0, sizeof(c));
778 c.create_cq.opcode = nvme_admin_create_cq;
779 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
780 c.create_cq.cqid = cpu_to_le16(qid);
781 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
782 c.create_cq.cq_flags = cpu_to_le16(flags);
783 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
784
785 status = nvme_submit_admin_cmd(dev, &c, NULL);
786 if (status)
787 return -EIO;
788 return 0;
789}
790
791static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
792 struct nvme_queue *nvmeq)
793{
794 int status;
795 struct nvme_command c;
796 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
797
798 memset(&c, 0, sizeof(c));
799 c.create_sq.opcode = nvme_admin_create_sq;
800 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
801 c.create_sq.sqid = cpu_to_le16(qid);
802 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
803 c.create_sq.sq_flags = cpu_to_le16(flags);
804 c.create_sq.cqid = cpu_to_le16(qid);
805
806 status = nvme_submit_admin_cmd(dev, &c, NULL);
807 if (status)
808 return -EIO;
809 return 0;
810}
811
812static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
813{
814 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
815}
816
817static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
818{
819 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
820}
821
822static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
823 dma_addr_t dma_addr)
824{
825 struct nvme_command c;
826
827 memset(&c, 0, sizeof(c));
828 c.identify.opcode = nvme_admin_identify;
829 c.identify.nsid = cpu_to_le32(nsid);
830 c.identify.prp1 = cpu_to_le64(dma_addr);
831 c.identify.cns = cpu_to_le32(cns);
832
833 return nvme_submit_admin_cmd(dev, &c, NULL);
834}
835
836static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
837 unsigned dword11, dma_addr_t dma_addr)
838{
839 struct nvme_command c;
840
841 memset(&c, 0, sizeof(c));
842 c.features.opcode = nvme_admin_get_features;
843 c.features.prp1 = cpu_to_le64(dma_addr);
844 c.features.fid = cpu_to_le32(fid);
845 c.features.dword11 = cpu_to_le32(dword11);
846
847 return nvme_submit_admin_cmd(dev, &c, NULL);
848}
849
850static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
851 unsigned dword11, dma_addr_t dma_addr, u32 *result)
852{
853 struct nvme_command c;
854
855 memset(&c, 0, sizeof(c));
856 c.features.opcode = nvme_admin_set_features;
857 c.features.prp1 = cpu_to_le64(dma_addr);
858 c.features.fid = cpu_to_le32(fid);
859 c.features.dword11 = cpu_to_le32(dword11);
860
861 return nvme_submit_admin_cmd(dev, &c, result);
862}
863
864static void nvme_free_queue(struct nvme_dev *dev, int qid)
865{
866 struct nvme_queue *nvmeq = dev->queues[qid];
867 int vector = dev->entry[nvmeq->cq_vector].vector;
868
869 irq_set_affinity_hint(vector, NULL);
870 free_irq(vector, nvmeq);
871
872 /* Don't tell the adapter to delete the admin queue */
873 if (qid) {
874 adapter_delete_sq(dev, qid);
875 adapter_delete_cq(dev, qid);
876 }
877
878 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
879 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
880 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
881 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
882 kfree(nvmeq);
883}
884
885static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
886 int depth, int vector)
887{
888 struct device *dmadev = &dev->pci_dev->dev;
889 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
890 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
891 if (!nvmeq)
892 return NULL;
893
894 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
895 &nvmeq->cq_dma_addr, GFP_KERNEL);
896 if (!nvmeq->cqes)
897 goto free_nvmeq;
898 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
899
900 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
901 &nvmeq->sq_dma_addr, GFP_KERNEL);
902 if (!nvmeq->sq_cmds)
903 goto free_cqdma;
904
905 nvmeq->q_dmadev = dmadev;
906 nvmeq->dev = dev;
907 spin_lock_init(&nvmeq->q_lock);
908 nvmeq->cq_head = 0;
909 nvmeq->cq_phase = 1;
910 init_waitqueue_head(&nvmeq->sq_full);
911 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
912 bio_list_init(&nvmeq->sq_cong);
913 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
914 nvmeq->q_depth = depth;
915 nvmeq->cq_vector = vector;
916
917 return nvmeq;
918
919 free_cqdma:
920 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
921 nvmeq->cq_dma_addr);
922 free_nvmeq:
923 kfree(nvmeq);
924 return NULL;
925}
926
927static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
928 const char *name)
929{
930 if (use_threaded_interrupts)
931 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
932 nvme_irq_check, nvme_irq,
933 IRQF_DISABLED | IRQF_SHARED,
934 name, nvmeq);
935 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
936 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
937}
938
939static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
940 int qid, int cq_size, int vector)
941{
942 int result;
943 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
944
945 if (!nvmeq)
946 return ERR_PTR(-ENOMEM);
947
948 result = adapter_alloc_cq(dev, qid, nvmeq);
949 if (result < 0)
950 goto free_nvmeq;
951
952 result = adapter_alloc_sq(dev, qid, nvmeq);
953 if (result < 0)
954 goto release_cq;
955
956 result = queue_request_irq(dev, nvmeq, "nvme");
957 if (result < 0)
958 goto release_sq;
959
960 return nvmeq;
961
962 release_sq:
963 adapter_delete_sq(dev, qid);
964 release_cq:
965 adapter_delete_cq(dev, qid);
966 free_nvmeq:
967 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
968 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
969 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
970 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
971 kfree(nvmeq);
972 return ERR_PTR(result);
973}
974
975static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
976{
977 int result;
978 u32 aqa;
979 u64 cap;
980 unsigned long timeout;
981 struct nvme_queue *nvmeq;
982
983 dev->dbs = ((void __iomem *)dev->bar) + 4096;
984
985 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
986 if (!nvmeq)
987 return -ENOMEM;
988
989 aqa = nvmeq->q_depth - 1;
990 aqa |= aqa << 16;
991
992 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
993 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
994 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
995 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
996
997 writel(0, &dev->bar->cc);
998 writel(aqa, &dev->bar->aqa);
999 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
1000 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
1001 writel(dev->ctrl_config, &dev->bar->cc);
1002
1003 cap = readq(&dev->bar->cap);
1004 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1005 dev->db_stride = NVME_CAP_STRIDE(cap);
1006
1007 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
1008 msleep(100);
1009 if (fatal_signal_pending(current))
1010 return -EINTR;
1011 if (time_after(jiffies, timeout)) {
1012 dev_err(&dev->pci_dev->dev,
1013 "Device not ready; aborting initialisation\n");
1014 return -ENODEV;
1015 }
1016 }
1017
1018 result = queue_request_irq(dev, nvmeq, "nvme admin");
1019 dev->queues[0] = nvmeq;
1020 return result;
1021}
1022
1023static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1024 unsigned long addr, unsigned length)
1025{
1026 int i, err, count, nents, offset;
1027 struct scatterlist *sg;
1028 struct page **pages;
1029 struct nvme_iod *iod;
1030
1031 if (addr & 3)
1032 return ERR_PTR(-EINVAL);
1033 if (!length)
1034 return ERR_PTR(-EINVAL);
1035
1036 offset = offset_in_page(addr);
1037 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1038 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
1039
1040 err = get_user_pages_fast(addr, count, 1, pages);
1041 if (err < count) {
1042 count = err;
1043 err = -EFAULT;
1044 goto put_pages;
1045 }
1046
1047 iod = nvme_alloc_iod(count, length, GFP_KERNEL);
1048 sg = iod->sg;
1049 sg_init_table(sg, count);
1050 for (i = 0; i < count; i++) {
1051 sg_set_page(&sg[i], pages[i],
1052 min_t(int, length, PAGE_SIZE - offset), offset);
1053 length -= (PAGE_SIZE - offset);
1054 offset = 0;
1055 }
1056 sg_mark_end(&sg[i - 1]);
1057 iod->nents = count;
1058
1059 err = -ENOMEM;
1060 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1061 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1062 if (!nents)
1063 goto free_iod;
1064
1065 kfree(pages);
1066 return iod;
1067
1068 free_iod:
1069 kfree(iod);
1070 put_pages:
1071 for (i = 0; i < count; i++)
1072 put_page(pages[i]);
1073 kfree(pages);
1074 return ERR_PTR(err);
1075}
1076
1077static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1078 struct nvme_iod *iod)
1079{
1080 int i;
1081
1082 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
1083 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1084
1085 for (i = 0; i < iod->nents; i++)
1086 put_page(sg_page(&iod->sg[i]));
1087}
1088
1089static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1090{
1091 struct nvme_dev *dev = ns->dev;
1092 struct nvme_queue *nvmeq;
1093 struct nvme_user_io io;
1094 struct nvme_command c;
1095 unsigned length;
1096 int status;
1097 struct nvme_iod *iod;
1098
1099 if (copy_from_user(&io, uio, sizeof(io)))
1100 return -EFAULT;
1101 length = (io.nblocks + 1) << ns->lba_shift;
1102
1103 switch (io.opcode) {
1104 case nvme_cmd_write:
1105 case nvme_cmd_read:
1106 case nvme_cmd_compare:
1107 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
1108 break;
1109 default:
1110 return -EINVAL;
1111 }
1112
1113 if (IS_ERR(iod))
1114 return PTR_ERR(iod);
1115
1116 memset(&c, 0, sizeof(c));
1117 c.rw.opcode = io.opcode;
1118 c.rw.flags = io.flags;
1119 c.rw.nsid = cpu_to_le32(ns->ns_id);
1120 c.rw.slba = cpu_to_le64(io.slba);
1121 c.rw.length = cpu_to_le16(io.nblocks);
1122 c.rw.control = cpu_to_le16(io.control);
1123 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
1124 c.rw.reftag = io.reftag;
1125 c.rw.apptag = io.apptag;
1126 c.rw.appmask = io.appmask;
1127 /* XXX: metadata */
1128 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
1129
1130 nvmeq = get_nvmeq(dev);
1131 /*
1132 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
1133 * disabled. We may be preempted at any point, and be rescheduled
1134 * to a different CPU. That will cause cacheline bouncing, but no
1135 * additional races since q_lock already protects against other CPUs.
1136 */
1137 put_nvmeq(nvmeq);
1138 if (length != (io.nblocks + 1) << ns->lba_shift)
1139 status = -ENOMEM;
1140 else
1141 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
1142
1143 nvme_unmap_user_pages(dev, io.opcode & 1, iod);
1144 nvme_free_iod(dev, iod);
1145 return status;
1146}
1147
1148static int nvme_user_admin_cmd(struct nvme_ns *ns,
1149 struct nvme_admin_cmd __user *ucmd)
1150{
1151 struct nvme_dev *dev = ns->dev;
1152 struct nvme_admin_cmd cmd;
1153 struct nvme_command c;
1154 int status, length;
1155 struct nvme_iod *iod;
1156
1157 if (!capable(CAP_SYS_ADMIN))
1158 return -EACCES;
1159 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1160 return -EFAULT;
1161
1162 memset(&c, 0, sizeof(c));
1163 c.common.opcode = cmd.opcode;
1164 c.common.flags = cmd.flags;
1165 c.common.nsid = cpu_to_le32(cmd.nsid);
1166 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1167 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1168 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1169 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1170 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1171 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1172 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1173 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1174
1175 length = cmd.data_len;
1176 if (cmd.data_len) {
1177 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
1178 length);
1179 if (IS_ERR(iod))
1180 return PTR_ERR(iod);
1181 length = nvme_setup_prps(dev, &c.common, iod, length,
1182 GFP_KERNEL);
1183 }
1184
1185 if (length != cmd.data_len)
1186 status = -ENOMEM;
1187 else
1188 status = nvme_submit_admin_cmd(dev, &c, NULL);
1189
1190 if (cmd.data_len) {
1191 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
1192 nvme_free_iod(dev, iod);
1193 }
1194 return status;
1195}
1196
1197static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1198 unsigned long arg)
1199{
1200 struct nvme_ns *ns = bdev->bd_disk->private_data;
1201
1202 switch (cmd) {
1203 case NVME_IOCTL_ID:
1204 return ns->ns_id;
1205 case NVME_IOCTL_ADMIN_CMD:
1206 return nvme_user_admin_cmd(ns, (void __user *)arg);
1207 case NVME_IOCTL_SUBMIT_IO:
1208 return nvme_submit_io(ns, (void __user *)arg);
1209 default:
1210 return -ENOTTY;
1211 }
1212}
1213
1214static const struct block_device_operations nvme_fops = {
1215 .owner = THIS_MODULE,
1216 .ioctl = nvme_ioctl,
1217 .compat_ioctl = nvme_ioctl,
1218};
1219
1220static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1221{
1222 int depth = nvmeq->q_depth - 1;
1223 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1224 unsigned long now = jiffies;
1225 int cmdid;
1226
1227 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1228 void *ctx;
1229 nvme_completion_fn fn;
1230 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1231
1232 if (!time_after(now, info[cmdid].timeout))
1233 continue;
1234 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
1235 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1236 fn(nvmeq->dev, ctx, &cqe);
1237 }
1238}
1239
1240static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1241{
1242 while (bio_list_peek(&nvmeq->sq_cong)) {
1243 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1244 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1245 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1246 bio_list_add_head(&nvmeq->sq_cong, bio);
1247 break;
1248 }
1249 if (bio_list_empty(&nvmeq->sq_cong))
1250 remove_wait_queue(&nvmeq->sq_full,
1251 &nvmeq->sq_cong_wait);
1252 }
1253}
1254
1255static int nvme_kthread(void *data)
1256{
1257 struct nvme_dev *dev;
1258
1259 while (!kthread_should_stop()) {
1260 __set_current_state(TASK_RUNNING);
1261 spin_lock(&dev_list_lock);
1262 list_for_each_entry(dev, &dev_list, node) {
1263 int i;
1264 for (i = 0; i < dev->queue_count; i++) {
1265 struct nvme_queue *nvmeq = dev->queues[i];
1266 if (!nvmeq)
1267 continue;
1268 spin_lock_irq(&nvmeq->q_lock);
1269 if (nvme_process_cq(nvmeq))
1270 printk("process_cq did something\n");
1271 nvme_timeout_ios(nvmeq);
1272 nvme_resubmit_bios(nvmeq);
1273 spin_unlock_irq(&nvmeq->q_lock);
1274 }
1275 }
1276 spin_unlock(&dev_list_lock);
1277 set_current_state(TASK_INTERRUPTIBLE);
1278 schedule_timeout(HZ);
1279 }
1280 return 0;
1281}
1282
1283static DEFINE_IDA(nvme_index_ida);
1284
1285static int nvme_get_ns_idx(void)
1286{
1287 int index, error;
1288
1289 do {
1290 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
1291 return -1;
1292
1293 spin_lock(&dev_list_lock);
1294 error = ida_get_new(&nvme_index_ida, &index);
1295 spin_unlock(&dev_list_lock);
1296 } while (error == -EAGAIN);
1297
1298 if (error)
1299 index = -1;
1300 return index;
1301}
1302
1303static void nvme_put_ns_idx(int index)
1304{
1305 spin_lock(&dev_list_lock);
1306 ida_remove(&nvme_index_ida, index);
1307 spin_unlock(&dev_list_lock);
1308}
1309
1310static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
1311 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1312{
1313 struct nvme_ns *ns;
1314 struct gendisk *disk;
1315 int lbaf;
1316
1317 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1318 return NULL;
1319
1320 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1321 if (!ns)
1322 return NULL;
1323 ns->queue = blk_alloc_queue(GFP_KERNEL);
1324 if (!ns->queue)
1325 goto out_free_ns;
1326 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
1327 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1328 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1329/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
1330 blk_queue_make_request(ns->queue, nvme_make_request);
1331 ns->dev = dev;
1332 ns->queue->queuedata = ns;
1333
1334 disk = alloc_disk(NVME_MINORS);
1335 if (!disk)
1336 goto out_free_queue;
1337 ns->ns_id = nsid;
1338 ns->disk = disk;
1339 lbaf = id->flbas & 0xf;
1340 ns->lba_shift = id->lbaf[lbaf].ds;
1341
1342 disk->major = nvme_major;
1343 disk->minors = NVME_MINORS;
1344 disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
1345 disk->fops = &nvme_fops;
1346 disk->private_data = ns;
1347 disk->queue = ns->queue;
1348 disk->driverfs_dev = &dev->pci_dev->dev;
1349 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
1350 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1351
1352 return ns;
1353
1354 out_free_queue:
1355 blk_cleanup_queue(ns->queue);
1356 out_free_ns:
1357 kfree(ns);
1358 return NULL;
1359}
1360
1361static void nvme_ns_free(struct nvme_ns *ns)
1362{
1363 int index = ns->disk->first_minor / NVME_MINORS;
1364 put_disk(ns->disk);
1365 nvme_put_ns_idx(index);
1366 blk_cleanup_queue(ns->queue);
1367 kfree(ns);
1368}
1369
1370static int set_queue_count(struct nvme_dev *dev, int count)
1371{
1372 int status;
1373 u32 result;
1374 u32 q_count = (count - 1) | ((count - 1) << 16);
1375
1376 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
1377 &result);
1378 if (status)
1379 return -EIO;
1380 return min(result & 0xffff, result >> 16) + 1;
1381}
1382
1383static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1384{
1385 int result, cpu, i, nr_io_queues, db_bar_size;
1386
1387 nr_io_queues = num_online_cpus();
1388 result = set_queue_count(dev, nr_io_queues);
1389 if (result < 0)
1390 return result;
1391 if (result < nr_io_queues)
1392 nr_io_queues = result;
1393
1394 /* Deregister the admin queue's interrupt */
1395 free_irq(dev->entry[0].vector, dev->queues[0]);
1396
1397 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1398 if (db_bar_size > 8192) {
1399 iounmap(dev->bar);
1400 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
1401 db_bar_size);
1402 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1403 dev->queues[0]->q_db = dev->dbs;
1404 }
1405
1406 for (i = 0; i < nr_io_queues; i++)
1407 dev->entry[i].entry = i;
1408 for (;;) {
1409 result = pci_enable_msix(dev->pci_dev, dev->entry,
1410 nr_io_queues);
1411 if (result == 0) {
1412 break;
1413 } else if (result > 0) {
1414 nr_io_queues = result;
1415 continue;
1416 } else {
1417 nr_io_queues = 1;
1418 break;
1419 }
1420 }
1421
1422 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1423 /* XXX: handle failure here */
1424
1425 cpu = cpumask_first(cpu_online_mask);
1426 for (i = 0; i < nr_io_queues; i++) {
1427 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1428 cpu = cpumask_next(cpu, cpu_online_mask);
1429 }
1430
1431 for (i = 0; i < nr_io_queues; i++) {
1432 dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
1433 NVME_Q_DEPTH, i);
1434 if (IS_ERR(dev->queues[i + 1]))
1435 return PTR_ERR(dev->queues[i + 1]);
1436 dev->queue_count++;
1437 }
1438
1439 for (; i < num_possible_cpus(); i++) {
1440 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1441 dev->queues[i + 1] = dev->queues[target + 1];
1442 }
1443
1444 return 0;
1445}
1446
1447static void nvme_free_queues(struct nvme_dev *dev)
1448{
1449 int i;
1450
1451 for (i = dev->queue_count - 1; i >= 0; i--)
1452 nvme_free_queue(dev, i);
1453}
1454
1455static int __devinit nvme_dev_add(struct nvme_dev *dev)
1456{
1457 int res, nn, i;
1458 struct nvme_ns *ns, *next;
1459 struct nvme_id_ctrl *ctrl;
1460 struct nvme_id_ns *id_ns;
1461 void *mem;
1462 dma_addr_t dma_addr;
1463
1464 res = nvme_setup_io_queues(dev);
1465 if (res)
1466 return res;
1467
1468 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
1469 GFP_KERNEL);
1470
1471 res = nvme_identify(dev, 0, 1, dma_addr);
1472 if (res) {
1473 res = -EIO;
1474 goto out_free;
1475 }
1476
1477 ctrl = mem;
1478 nn = le32_to_cpup(&ctrl->nn);
1479 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1480 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1481 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
1482
1483 id_ns = mem;
1484 for (i = 1; i <= nn; i++) {
1485 res = nvme_identify(dev, i, 0, dma_addr);
1486 if (res)
1487 continue;
1488
1489 if (id_ns->ncap == 0)
1490 continue;
1491
1492 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
1493 dma_addr + 4096);
1494 if (res)
1495 continue;
1496
1497 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
1498 if (ns)
1499 list_add_tail(&ns->list, &dev->namespaces);
1500 }
1501 list_for_each_entry(ns, &dev->namespaces, list)
1502 add_disk(ns->disk);
1503
1504 goto out;
1505
1506 out_free:
1507 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1508 list_del(&ns->list);
1509 nvme_ns_free(ns);
1510 }
1511
1512 out:
1513 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
1514 return res;
1515}
1516
1517static int nvme_dev_remove(struct nvme_dev *dev)
1518{
1519 struct nvme_ns *ns, *next;
1520
1521 spin_lock(&dev_list_lock);
1522 list_del(&dev->node);
1523 spin_unlock(&dev_list_lock);
1524
1525 /* TODO: wait all I/O finished or cancel them */
1526
1527 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1528 list_del(&ns->list);
1529 del_gendisk(ns->disk);
1530 nvme_ns_free(ns);
1531 }
1532
1533 nvme_free_queues(dev);
1534
1535 return 0;
1536}
1537
1538static int nvme_setup_prp_pools(struct nvme_dev *dev)
1539{
1540 struct device *dmadev = &dev->pci_dev->dev;
1541 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
1542 PAGE_SIZE, PAGE_SIZE, 0);
1543 if (!dev->prp_page_pool)
1544 return -ENOMEM;
1545
1546 /* Optimisation for I/Os between 4k and 128k */
1547 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
1548 256, 256, 0);
1549 if (!dev->prp_small_pool) {
1550 dma_pool_destroy(dev->prp_page_pool);
1551 return -ENOMEM;
1552 }
1553 return 0;
1554}
1555
1556static void nvme_release_prp_pools(struct nvme_dev *dev)
1557{
1558 dma_pool_destroy(dev->prp_page_pool);
1559 dma_pool_destroy(dev->prp_small_pool);
1560}
1561
1562/* XXX: Use an ida or something to let remove / add work correctly */
1563static void nvme_set_instance(struct nvme_dev *dev)
1564{
1565 static int instance;
1566 dev->instance = instance++;
1567}
1568
1569static void nvme_release_instance(struct nvme_dev *dev)
1570{
1571}
1572
1573static int __devinit nvme_probe(struct pci_dev *pdev,
1574 const struct pci_device_id *id)
1575{
1576 int bars, result = -ENOMEM;
1577 struct nvme_dev *dev;
1578
1579 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1580 if (!dev)
1581 return -ENOMEM;
1582 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
1583 GFP_KERNEL);
1584 if (!dev->entry)
1585 goto free;
1586 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
1587 GFP_KERNEL);
1588 if (!dev->queues)
1589 goto free;
1590
1591 if (pci_enable_device_mem(pdev))
1592 goto free;
1593 pci_set_master(pdev);
1594 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1595 if (pci_request_selected_regions(pdev, bars, "nvme"))
1596 goto disable;
1597
1598 INIT_LIST_HEAD(&dev->namespaces);
1599 dev->pci_dev = pdev;
1600 pci_set_drvdata(pdev, dev);
1601 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1602 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1603 nvme_set_instance(dev);
1604 dev->entry[0].vector = pdev->irq;
1605
1606 result = nvme_setup_prp_pools(dev);
1607 if (result)
1608 goto disable_msix;
1609
1610 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1611 if (!dev->bar) {
1612 result = -ENOMEM;
1613 goto disable_msix;
1614 }
1615
1616 result = nvme_configure_admin_queue(dev);
1617 if (result)
1618 goto unmap;
1619 dev->queue_count++;
1620
1621 spin_lock(&dev_list_lock);
1622 list_add(&dev->node, &dev_list);
1623 spin_unlock(&dev_list_lock);
1624
1625 result = nvme_dev_add(dev);
1626 if (result)
1627 goto delete;
1628
1629 return 0;
1630
1631 delete:
1632 spin_lock(&dev_list_lock);
1633 list_del(&dev->node);
1634 spin_unlock(&dev_list_lock);
1635
1636 nvme_free_queues(dev);
1637 unmap:
1638 iounmap(dev->bar);
1639 disable_msix:
1640 pci_disable_msix(pdev);
1641 nvme_release_instance(dev);
1642 nvme_release_prp_pools(dev);
1643 disable:
1644 pci_disable_device(pdev);
1645 pci_release_regions(pdev);
1646 free:
1647 kfree(dev->queues);
1648 kfree(dev->entry);
1649 kfree(dev);
1650 return result;
1651}
1652
1653static void __devexit nvme_remove(struct pci_dev *pdev)
1654{
1655 struct nvme_dev *dev = pci_get_drvdata(pdev);
1656 nvme_dev_remove(dev);
1657 pci_disable_msix(pdev);
1658 iounmap(dev->bar);
1659 nvme_release_instance(dev);
1660 nvme_release_prp_pools(dev);
1661 pci_disable_device(pdev);
1662 pci_release_regions(pdev);
1663 kfree(dev->queues);
1664 kfree(dev->entry);
1665 kfree(dev);
1666}
1667
1668/* These functions are yet to be implemented */
1669#define nvme_error_detected NULL
1670#define nvme_dump_registers NULL
1671#define nvme_link_reset NULL
1672#define nvme_slot_reset NULL
1673#define nvme_error_resume NULL
1674#define nvme_suspend NULL
1675#define nvme_resume NULL
1676
1677static struct pci_error_handlers nvme_err_handler = {
1678 .error_detected = nvme_error_detected,
1679 .mmio_enabled = nvme_dump_registers,
1680 .link_reset = nvme_link_reset,
1681 .slot_reset = nvme_slot_reset,
1682 .resume = nvme_error_resume,
1683};
1684
1685/* Move to pci_ids.h later */
1686#define PCI_CLASS_STORAGE_EXPRESS 0x010802
1687
1688static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
1689 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
1690 { 0, }
1691};
1692MODULE_DEVICE_TABLE(pci, nvme_id_table);
1693
1694static struct pci_driver nvme_driver = {
1695 .name = "nvme",
1696 .id_table = nvme_id_table,
1697 .probe = nvme_probe,
1698 .remove = __devexit_p(nvme_remove),
1699 .suspend = nvme_suspend,
1700 .resume = nvme_resume,
1701 .err_handler = &nvme_err_handler,
1702};
1703
1704static int __init nvme_init(void)
1705{
1706 int result = -EBUSY;
1707
1708 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
1709 if (IS_ERR(nvme_thread))
1710 return PTR_ERR(nvme_thread);
1711
1712 nvme_major = register_blkdev(nvme_major, "nvme");
1713 if (nvme_major <= 0)
1714 goto kill_kthread;
1715
1716 result = pci_register_driver(&nvme_driver);
1717 if (result)
1718 goto unregister_blkdev;
1719 return 0;
1720
1721 unregister_blkdev:
1722 unregister_blkdev(nvme_major, "nvme");
1723 kill_kthread:
1724 kthread_stop(nvme_thread);
1725 return result;
1726}
1727
1728static void __exit nvme_exit(void)
1729{
1730 pci_unregister_driver(&nvme_driver);
1731 unregister_blkdev(nvme_major, "nvme");
1732 kthread_stop(nvme_thread);
1733}
1734
1735MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1736MODULE_LICENSE("GPL");
1737MODULE_VERSION("0.8");
1738module_init(nvme_init);
1739module_exit(nvme_exit);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 6a8771f47a5..32362cf35b8 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -846,6 +846,15 @@ int tpm_do_selftest(struct tpm_chip *chip)
846 846
847 do { 847 do {
848 rc = __tpm_pcr_read(chip, 0, digest); 848 rc = __tpm_pcr_read(chip, 0, digest);
849 if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
850 dev_info(chip->dev,
851 "TPM is disabled/deactivated (0x%X)\n", rc);
852 /* TPM is disabled and/or deactivated; driver can
853 * proceed and TPM does handle commands for
854 * suspend/resume correctly
855 */
856 return 0;
857 }
849 if (rc != TPM_WARN_DOING_SELFTEST) 858 if (rc != TPM_WARN_DOING_SELFTEST)
850 return rc; 859 return rc;
851 msleep(delay_msec); 860 msleep(delay_msec);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 8c1df302fbb..01054713828 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -39,6 +39,9 @@ enum tpm_addr {
39}; 39};
40 40
41#define TPM_WARN_DOING_SELFTEST 0x802 41#define TPM_WARN_DOING_SELFTEST 0x802
42#define TPM_ERR_DEACTIVATED 0x6
43#define TPM_ERR_DISABLED 0x7
44
42#define TPM_HEADER_SIZE 10 45#define TPM_HEADER_SIZE 10
43extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr, 46extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
44 char *); 47 char *);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5a99bb3f255..f1a274994bb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
124 124
125config MX3_IPU 125config MX3_IPU
126 bool "MX3x Image Processing Unit support" 126 bool "MX3x Image Processing Unit support"
127 depends on SOC_IMX31 || SOC_IMX35 127 depends on ARCH_MXC
128 select DMA_ENGINE 128 select DMA_ENGINE
129 default y 129 default y
130 help 130 help
@@ -187,6 +187,13 @@ config TIMB_DMA
187 help 187 help
188 Enable support for the Timberdale FPGA DMA engine. 188 Enable support for the Timberdale FPGA DMA engine.
189 189
190config SIRF_DMA
191 tristate "CSR SiRFprimaII DMA support"
192 depends on ARCH_PRIMA2
193 select DMA_ENGINE
194 help
195 Enable support for the CSR SiRFprimaII DMA engine.
196
190config ARCH_HAS_ASYNC_TX_FIND_CHANNEL 197config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
191 bool 198 bool
192 199
@@ -201,26 +208,26 @@ config PL330_DMA
201 platform_data for a dma-pl330 device. 208 platform_data for a dma-pl330 device.
202 209
203config PCH_DMA 210config PCH_DMA
204 tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support" 211 tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
205 depends on PCI && X86 212 depends on PCI && X86
206 select DMA_ENGINE 213 select DMA_ENGINE
207 help 214 help
208 Enable support for Intel EG20T PCH DMA engine. 215 Enable support for Intel EG20T PCH DMA engine.
209 216
210 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 217 This driver also can be used for LAPIS Semiconductor IOH(Input/
211 Output Hub), ML7213 and ML7223. 218 Output Hub), ML7213, ML7223 and ML7831.
212 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is 219 ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
213 for MP(Media Phone) use. 220 for MP(Media Phone) use and ML7831 IOH is for general purpose use.
214 ML7213/ML7223 is companion chip for Intel Atom E6xx series. 221 ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
215 ML7213/ML7223 is completely compatible for Intel EG20T PCH. 222 ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
216 223
217config IMX_SDMA 224config IMX_SDMA
218 tristate "i.MX SDMA support" 225 tristate "i.MX SDMA support"
219 depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5 226 depends on ARCH_MXC
220 select DMA_ENGINE 227 select DMA_ENGINE
221 help 228 help
222 Support the i.MX SDMA engine. This engine is integrated into 229 Support the i.MX SDMA engine. This engine is integrated into
223 Freescale i.MX25/31/35/51 chips. 230 Freescale i.MX25/31/35/51/53 chips.
224 231
225config IMX_DMA 232config IMX_DMA
226 tristate "i.MX DMA support" 233 tristate "i.MX DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 30cf3b1f0c5..009a222e828 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
21obj-$(CONFIG_IMX_DMA) += imx-dma.o 21obj-$(CONFIG_IMX_DMA) += imx-dma.o
22obj-$(CONFIG_MXS_DMA) += mxs-dma.o 22obj-$(CONFIG_MXS_DMA) += mxs-dma.o
23obj-$(CONFIG_TIMB_DMA) += timb_dma.o 23obj-$(CONFIG_TIMB_DMA) += timb_dma.o
24obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
24obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 25obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
25obj-$(CONFIG_PL330_DMA) += pl330.o 26obj-$(CONFIG_PL330_DMA) += pl330.o
26obj-$(CONFIG_PCH_DMA) += pch_dma.o 27obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 0698695e8bf..8a281584458 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
854 int ret; 854 int ret;
855 855
856 /* Check if we already have a channel */ 856 /* Check if we already have a channel */
857 if (plchan->phychan) 857 if (plchan->phychan) {
858 return 0; 858 ch = plchan->phychan;
859 goto got_channel;
860 }
859 861
860 ch = pl08x_get_phy_channel(pl08x, plchan); 862 ch = pl08x_get_phy_channel(pl08x, plchan);
861 if (!ch) { 863 if (!ch) {
@@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
880 return -EBUSY; 882 return -EBUSY;
881 } 883 }
882 ch->signal = ret; 884 ch->signal = ret;
883
884 /* Assign the flow control signal to this channel */
885 if (txd->direction == DMA_TO_DEVICE)
886 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
887 else if (txd->direction == DMA_FROM_DEVICE)
888 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
889 } 885 }
890 886
887 plchan->phychan = ch;
891 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 888 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
892 ch->id, 889 ch->id,
893 ch->signal, 890 ch->signal,
894 plchan->name); 891 plchan->name);
895 892
893got_channel:
894 /* Assign the flow control signal to this channel */
895 if (txd->direction == DMA_MEM_TO_DEV)
896 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
897 else if (txd->direction == DMA_DEV_TO_MEM)
898 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
899
896 plchan->phychan_hold++; 900 plchan->phychan_hold++;
897 plchan->phychan = ch;
898 901
899 return 0; 902 return 0;
900} 903}
@@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1102 1105
1103 /* Transfer direction */ 1106 /* Transfer direction */
1104 plchan->runtime_direction = config->direction; 1107 plchan->runtime_direction = config->direction;
1105 if (config->direction == DMA_TO_DEVICE) { 1108 if (config->direction == DMA_MEM_TO_DEV) {
1106 addr_width = config->dst_addr_width; 1109 addr_width = config->dst_addr_width;
1107 maxburst = config->dst_maxburst; 1110 maxburst = config->dst_maxburst;
1108 } else if (config->direction == DMA_FROM_DEVICE) { 1111 } else if (config->direction == DMA_DEV_TO_MEM) {
1109 addr_width = config->src_addr_width; 1112 addr_width = config->src_addr_width;
1110 maxburst = config->src_maxburst; 1113 maxburst = config->src_maxburst;
1111 } else { 1114 } else {
@@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1136 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1139 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1137 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1140 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1138 1141
1139 if (plchan->runtime_direction == DMA_FROM_DEVICE) { 1142 if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
1140 plchan->src_addr = config->src_addr; 1143 plchan->src_addr = config->src_addr;
1141 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | 1144 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
1142 pl08x_select_bus(plchan->cd->periph_buses, 1145 pl08x_select_bus(plchan->cd->periph_buses,
@@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1152 "configured channel %s (%s) for %s, data width %d, " 1155 "configured channel %s (%s) for %s, data width %d, "
1153 "maxburst %d words, LE, CCTL=0x%08x\n", 1156 "maxburst %d words, LE, CCTL=0x%08x\n",
1154 dma_chan_name(chan), plchan->name, 1157 dma_chan_name(chan), plchan->name,
1155 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 1158 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
1156 addr_width, 1159 addr_width,
1157 maxburst, 1160 maxburst,
1158 cctl); 1161 cctl);
@@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1322 1325
1323static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1326static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1324 struct dma_chan *chan, struct scatterlist *sgl, 1327 struct dma_chan *chan, struct scatterlist *sgl,
1325 unsigned int sg_len, enum dma_data_direction direction, 1328 unsigned int sg_len, enum dma_transfer_direction direction,
1326 unsigned long flags) 1329 unsigned long flags)
1327{ 1330{
1328 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1331 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1354 */ 1357 */
1355 txd->direction = direction; 1358 txd->direction = direction;
1356 1359
1357 if (direction == DMA_TO_DEVICE) { 1360 if (direction == DMA_MEM_TO_DEV) {
1358 txd->cctl = plchan->dst_cctl; 1361 txd->cctl = plchan->dst_cctl;
1359 slave_addr = plchan->dst_addr; 1362 slave_addr = plchan->dst_addr;
1360 } else if (direction == DMA_FROM_DEVICE) { 1363 } else if (direction == DMA_DEV_TO_MEM) {
1361 txd->cctl = plchan->src_cctl; 1364 txd->cctl = plchan->src_cctl;
1362 slave_addr = plchan->src_addr; 1365 slave_addr = plchan->src_addr;
1363 } else { 1366 } else {
@@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1368 } 1371 }
1369 1372
1370 if (plchan->cd->device_fc) 1373 if (plchan->cd->device_fc)
1371 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER : 1374 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
1372 PL080_FLOW_PER2MEM_PER; 1375 PL080_FLOW_PER2MEM_PER;
1373 else 1376 else
1374 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER : 1377 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
1375 PL080_FLOW_PER2MEM; 1378 PL080_FLOW_PER2MEM;
1376 1379
1377 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1380 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1387 list_add_tail(&dsg->node, &txd->dsg_list); 1390 list_add_tail(&dsg->node, &txd->dsg_list);
1388 1391
1389 dsg->len = sg_dma_len(sg); 1392 dsg->len = sg_dma_len(sg);
1390 if (direction == DMA_TO_DEVICE) { 1393 if (direction == DMA_MEM_TO_DEV) {
1391 dsg->src_addr = sg_phys(sg); 1394 dsg->src_addr = sg_phys(sg);
1392 dsg->dst_addr = slave_addr; 1395 dsg->dst_addr = slave_addr;
1393 } else { 1396 } else {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fcfa0a8b5c5..97f87b29b9f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -23,6 +23,8 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
26 28
27#include "at_hdmac_regs.h" 29#include "at_hdmac_regs.h"
28 30
@@ -660,7 +662,7 @@ err_desc_get:
660 */ 662 */
661static struct dma_async_tx_descriptor * 663static struct dma_async_tx_descriptor *
662atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 664atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
663 unsigned int sg_len, enum dma_data_direction direction, 665 unsigned int sg_len, enum dma_transfer_direction direction,
664 unsigned long flags) 666 unsigned long flags)
665{ 667{
666 struct at_dma_chan *atchan = to_at_dma_chan(chan); 668 struct at_dma_chan *atchan = to_at_dma_chan(chan);
@@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
678 680
679 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 681 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
680 sg_len, 682 sg_len,
681 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", 683 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
682 flags); 684 flags);
683 685
684 if (unlikely(!atslave || !sg_len)) { 686 if (unlikely(!atslave || !sg_len)) {
@@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
692 ctrlb = ATC_IEN; 694 ctrlb = ATC_IEN;
693 695
694 switch (direction) { 696 switch (direction) {
695 case DMA_TO_DEVICE: 697 case DMA_MEM_TO_DEV:
696 ctrla |= ATC_DST_WIDTH(reg_width); 698 ctrla |= ATC_DST_WIDTH(reg_width);
697 ctrlb |= ATC_DST_ADDR_MODE_FIXED 699 ctrlb |= ATC_DST_ADDR_MODE_FIXED
698 | ATC_SRC_ADDR_MODE_INCR 700 | ATC_SRC_ADDR_MODE_INCR
@@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
725 total_len += len; 727 total_len += len;
726 } 728 }
727 break; 729 break;
728 case DMA_FROM_DEVICE: 730 case DMA_DEV_TO_MEM:
729 ctrla |= ATC_SRC_WIDTH(reg_width); 731 ctrla |= ATC_SRC_WIDTH(reg_width);
730 ctrlb |= ATC_DST_ADDR_MODE_INCR 732 ctrlb |= ATC_DST_ADDR_MODE_INCR
731 | ATC_SRC_ADDR_MODE_FIXED 733 | ATC_SRC_ADDR_MODE_FIXED
@@ -787,7 +789,7 @@ err_desc_get:
787 */ 789 */
788static int 790static int
789atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 791atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
790 size_t period_len, enum dma_data_direction direction) 792 size_t period_len, enum dma_transfer_direction direction)
791{ 793{
792 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 794 if (period_len > (ATC_BTSIZE_MAX << reg_width))
793 goto err_out; 795 goto err_out;
@@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
795 goto err_out; 797 goto err_out;
796 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 798 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
797 goto err_out; 799 goto err_out;
798 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) 800 if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
799 goto err_out; 801 goto err_out;
800 802
801 return 0; 803 return 0;
@@ -810,7 +812,7 @@ err_out:
810static int 812static int
811atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, 813atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
812 unsigned int period_index, dma_addr_t buf_addr, 814 unsigned int period_index, dma_addr_t buf_addr,
813 size_t period_len, enum dma_data_direction direction) 815 size_t period_len, enum dma_transfer_direction direction)
814{ 816{
815 u32 ctrla; 817 u32 ctrla;
816 unsigned int reg_width = atslave->reg_width; 818 unsigned int reg_width = atslave->reg_width;
@@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
822 | period_len >> reg_width; 824 | period_len >> reg_width;
823 825
824 switch (direction) { 826 switch (direction) {
825 case DMA_TO_DEVICE: 827 case DMA_MEM_TO_DEV:
826 desc->lli.saddr = buf_addr + (period_len * period_index); 828 desc->lli.saddr = buf_addr + (period_len * period_index);
827 desc->lli.daddr = atslave->tx_reg; 829 desc->lli.daddr = atslave->tx_reg;
828 desc->lli.ctrla = ctrla; 830 desc->lli.ctrla = ctrla;
@@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
833 | ATC_DIF(AT_DMA_PER_IF); 835 | ATC_DIF(AT_DMA_PER_IF);
834 break; 836 break;
835 837
836 case DMA_FROM_DEVICE: 838 case DMA_DEV_TO_MEM:
837 desc->lli.saddr = atslave->rx_reg; 839 desc->lli.saddr = atslave->rx_reg;
838 desc->lli.daddr = buf_addr + (period_len * period_index); 840 desc->lli.daddr = buf_addr + (period_len * period_index);
839 desc->lli.ctrla = ctrla; 841 desc->lli.ctrla = ctrla;
@@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
861 */ 863 */
862static struct dma_async_tx_descriptor * 864static struct dma_async_tx_descriptor *
863atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 865atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
864 size_t period_len, enum dma_data_direction direction) 866 size_t period_len, enum dma_transfer_direction direction)
865{ 867{
866 struct at_dma_chan *atchan = to_at_dma_chan(chan); 868 struct at_dma_chan *atchan = to_at_dma_chan(chan);
867 struct at_dma_slave *atslave = chan->private; 869 struct at_dma_slave *atslave = chan->private;
@@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
872 unsigned int i; 874 unsigned int i;
873 875
874 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 876 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
875 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", 877 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
876 buf_addr, 878 buf_addr,
877 periods, buf_len, period_len); 879 periods, buf_len, period_len);
878 880
@@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan)
1175 1177
1176/*-- Module Management -----------------------------------------------*/ 1178/*-- Module Management -----------------------------------------------*/
1177 1179
1180/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1181static struct at_dma_platform_data at91sam9rl_config = {
1182 .nr_channels = 2,
1183};
1184static struct at_dma_platform_data at91sam9g45_config = {
1185 .nr_channels = 8,
1186};
1187
1188#if defined(CONFIG_OF)
1189static const struct of_device_id atmel_dma_dt_ids[] = {
1190 {
1191 .compatible = "atmel,at91sam9rl-dma",
1192 .data = &at91sam9rl_config,
1193 }, {
1194 .compatible = "atmel,at91sam9g45-dma",
1195 .data = &at91sam9g45_config,
1196 }, {
1197 /* sentinel */
1198 }
1199};
1200
1201MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1202#endif
1203
1204static const struct platform_device_id atdma_devtypes[] = {
1205 {
1206 .name = "at91sam9rl_dma",
1207 .driver_data = (unsigned long) &at91sam9rl_config,
1208 }, {
1209 .name = "at91sam9g45_dma",
1210 .driver_data = (unsigned long) &at91sam9g45_config,
1211 }, {
1212 /* sentinel */
1213 }
1214};
1215
1216static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
1217 struct platform_device *pdev)
1218{
1219 if (pdev->dev.of_node) {
1220 const struct of_device_id *match;
1221 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1222 if (match == NULL)
1223 return NULL;
1224 return match->data;
1225 }
1226 return (struct at_dma_platform_data *)
1227 platform_get_device_id(pdev)->driver_data;
1228}
1229
1178/** 1230/**
1179 * at_dma_off - disable DMA controller 1231 * at_dma_off - disable DMA controller
1180 * @atdma: the Atmel HDAMC device 1232 * @atdma: the Atmel HDAMC device
@@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma)
1193 1245
1194static int __init at_dma_probe(struct platform_device *pdev) 1246static int __init at_dma_probe(struct platform_device *pdev)
1195{ 1247{
1196 struct at_dma_platform_data *pdata;
1197 struct resource *io; 1248 struct resource *io;
1198 struct at_dma *atdma; 1249 struct at_dma *atdma;
1199 size_t size; 1250 size_t size;
1200 int irq; 1251 int irq;
1201 int err; 1252 int err;
1202 int i; 1253 int i;
1254 struct at_dma_platform_data *plat_dat;
1203 1255
1204 /* get DMA Controller parameters from platform */ 1256 /* setup platform data for each SoC */
1205 pdata = pdev->dev.platform_data; 1257 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1206 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) 1258 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1207 return -EINVAL; 1259 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1260
1261 /* get DMA parameters from controller type */
1262 plat_dat = at_dma_get_driver_data(pdev);
1263 if (!plat_dat)
1264 return -ENODEV;
1208 1265
1209 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1266 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1210 if (!io) 1267 if (!io)
@@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
1215 return irq; 1272 return irq;
1216 1273
1217 size = sizeof(struct at_dma); 1274 size = sizeof(struct at_dma);
1218 size += pdata->nr_channels * sizeof(struct at_dma_chan); 1275 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1219 atdma = kzalloc(size, GFP_KERNEL); 1276 atdma = kzalloc(size, GFP_KERNEL);
1220 if (!atdma) 1277 if (!atdma)
1221 return -ENOMEM; 1278 return -ENOMEM;
1222 1279
1223 /* discover transaction capabilites from the platform data */ 1280 /* discover transaction capabilities */
1224 atdma->dma_common.cap_mask = pdata->cap_mask; 1281 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1225 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; 1282 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1226 1283
1227 size = resource_size(io); 1284 size = resource_size(io);
1228 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1285 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
@@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1268 1325
1269 /* initialize channels related values */ 1326 /* initialize channels related values */
1270 INIT_LIST_HEAD(&atdma->dma_common.channels); 1327 INIT_LIST_HEAD(&atdma->dma_common.channels);
1271 for (i = 0; i < pdata->nr_channels; i++) { 1328 for (i = 0; i < plat_dat->nr_channels; i++) {
1272 struct at_dma_chan *atchan = &atdma->chan[i]; 1329 struct at_dma_chan *atchan = &atdma->chan[i];
1273 1330
1274 atchan->chan_common.device = &atdma->dma_common; 1331 atchan->chan_common.device = &atdma->dma_common;
@@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1313 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1370 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1314 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1371 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1315 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1372 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1316 pdata->nr_channels); 1373 plat_dat->nr_channels);
1317 1374
1318 dma_async_device_register(&atdma->dma_common); 1375 dma_async_device_register(&atdma->dma_common);
1319 1376
@@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
1495static struct platform_driver at_dma_driver = { 1552static struct platform_driver at_dma_driver = {
1496 .remove = __exit_p(at_dma_remove), 1553 .remove = __exit_p(at_dma_remove),
1497 .shutdown = at_dma_shutdown, 1554 .shutdown = at_dma_shutdown,
1555 .id_table = atdma_devtypes,
1498 .driver = { 1556 .driver = {
1499 .name = "at_hdmac", 1557 .name = "at_hdmac",
1500 .pm = &at_dma_dev_pm_ops, 1558 .pm = &at_dma_dev_pm_ops,
1559 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1501 }, 1560 },
1502}; 1561};
1503 1562
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index aa4c9aebab7..dcaedfc181c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
251/** 251/**
252 * struct at_dma - internal representation of an Atmel HDMA Controller 252 * struct at_dma - internal representation of an Atmel HDMA Controller
253 * @chan_common: common dmaengine dma_device object members 253 * @chan_common: common dmaengine dma_device object members
254 * @atdma_devtype: identifier of DMA controller compatibility
254 * @ch_regs: memory mapped register base 255 * @ch_regs: memory mapped register base
255 * @clk: dma controller clock 256 * @clk: dma controller clock
256 * @save_imr: interrupt mask register that is saved on suspend/resume cycle 257 * @save_imr: interrupt mask register that is saved on suspend/resume cycle
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 4234f416ef1..d65a718c0f9 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,7 @@ struct coh901318_desc {
39 struct scatterlist *sg; 39 struct scatterlist *sg;
40 unsigned int sg_len; 40 unsigned int sg_len;
41 struct coh901318_lli *lli; 41 struct coh901318_lli *lli;
42 enum dma_data_direction dir; 42 enum dma_transfer_direction dir;
43 unsigned long flags; 43 unsigned long flags;
44 u32 head_config; 44 u32 head_config;
45 u32 head_ctrl; 45 u32 head_ctrl;
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1034 1034
1035static struct dma_async_tx_descriptor * 1035static struct dma_async_tx_descriptor *
1036coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 1036coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1037 unsigned int sg_len, enum dma_data_direction direction, 1037 unsigned int sg_len, enum dma_transfer_direction direction,
1038 unsigned long flags) 1038 unsigned long flags)
1039{ 1039{
1040 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1040 struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1077 ctrl_last |= cohc->runtime_ctrl; 1077 ctrl_last |= cohc->runtime_ctrl;
1078 ctrl |= cohc->runtime_ctrl; 1078 ctrl |= cohc->runtime_ctrl;
1079 1079
1080 if (direction == DMA_TO_DEVICE) { 1080 if (direction == DMA_MEM_TO_DEV) {
1081 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | 1081 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
1082 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; 1082 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
1083 1083
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1085 ctrl_chained |= tx_flags; 1085 ctrl_chained |= tx_flags;
1086 ctrl_last |= tx_flags; 1086 ctrl_last |= tx_flags;
1087 ctrl |= tx_flags; 1087 ctrl |= tx_flags;
1088 } else if (direction == DMA_FROM_DEVICE) { 1088 } else if (direction == DMA_DEV_TO_MEM) {
1089 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | 1089 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
1090 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; 1090 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
1091 1091
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
1274 int i = 0; 1274 int i = 0;
1275 1275
1276 /* We only support mem to per or per to mem transfers */ 1276 /* We only support mem to per or per to mem transfers */
1277 if (config->direction == DMA_FROM_DEVICE) { 1277 if (config->direction == DMA_DEV_TO_MEM) {
1278 addr = config->src_addr; 1278 addr = config->src_addr;
1279 addr_width = config->src_addr_width; 1279 addr_width = config->src_addr_width;
1280 maxburst = config->src_maxburst; 1280 maxburst = config->src_maxburst;
1281 } else if (config->direction == DMA_TO_DEVICE) { 1281 } else if (config->direction == DMA_MEM_TO_DEV) {
1282 addr = config->dst_addr; 1282 addr = config->dst_addr;
1283 addr_width = config->dst_addr_width; 1283 addr_width = config->dst_addr_width;
1284 maxburst = config->dst_maxburst; 1284 maxburst = config->dst_maxburst;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 9f7e0e6a7ee..6c0e2d4c668 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -7,11 +7,10 @@
7 * Author: Per Friden <per.friden@stericsson.com> 7 * Author: Per Friden <per.friden@stericsson.com>
8 */ 8 */
9 9
10#include <linux/dma-mapping.h>
11#include <linux/spinlock.h> 10#include <linux/spinlock.h>
12#include <linux/dmapool.h>
13#include <linux/memory.h> 11#include <linux/memory.h>
14#include <linux/gfp.h> 12#include <linux/gfp.h>
13#include <linux/dmapool.h>
15#include <mach/coh901318.h> 14#include <mach/coh901318.h>
16 15
17#include "coh901318_lli.h" 16#include "coh901318_lli.h"
@@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
177 struct coh901318_lli *lli, 176 struct coh901318_lli *lli,
178 dma_addr_t buf, unsigned int size, 177 dma_addr_t buf, unsigned int size,
179 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, 178 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
180 enum dma_data_direction dir) 179 enum dma_transfer_direction dir)
181{ 180{
182 int s = size; 181 int s = size;
183 dma_addr_t src; 182 dma_addr_t src;
184 dma_addr_t dst; 183 dma_addr_t dst;
185 184
186 185
187 if (dir == DMA_TO_DEVICE) { 186 if (dir == DMA_MEM_TO_DEV) {
188 src = buf; 187 src = buf;
189 dst = dev_addr; 188 dst = dev_addr;
190 189
191 } else if (dir == DMA_FROM_DEVICE) { 190 } else if (dir == DMA_DEV_TO_MEM) {
192 191
193 src = dev_addr; 192 src = dev_addr;
194 dst = buf; 193 dst = buf;
@@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
215 214
216 lli = coh901318_lli_next(lli); 215 lli = coh901318_lli_next(lli);
217 216
218 if (dir == DMA_TO_DEVICE) 217 if (dir == DMA_MEM_TO_DEV)
219 src += block_size; 218 src += block_size;
220 else if (dir == DMA_FROM_DEVICE) 219 else if (dir == DMA_DEV_TO_MEM)
221 dst += block_size; 220 dst += block_size;
222 } 221 }
223 222
@@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
234 struct scatterlist *sgl, unsigned int nents, 233 struct scatterlist *sgl, unsigned int nents,
235 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, 234 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
236 u32 ctrl_last, 235 u32 ctrl_last,
237 enum dma_data_direction dir, u32 ctrl_irq_mask) 236 enum dma_transfer_direction dir, u32 ctrl_irq_mask)
238{ 237{
239 int i; 238 int i;
240 struct scatterlist *sg; 239 struct scatterlist *sg;
@@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
249 248
250 spin_lock(&pool->lock); 249 spin_lock(&pool->lock);
251 250
252 if (dir == DMA_TO_DEVICE) 251 if (dir == DMA_MEM_TO_DEV)
253 dst = dev_addr; 252 dst = dev_addr;
254 else if (dir == DMA_FROM_DEVICE) 253 else if (dir == DMA_DEV_TO_MEM)
255 src = dev_addr; 254 src = dev_addr;
256 else 255 else
257 goto err; 256 goto err;
@@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
269 ctrl_sg = ctrl ? ctrl : ctrl_last; 268 ctrl_sg = ctrl ? ctrl : ctrl_last;
270 269
271 270
272 if (dir == DMA_TO_DEVICE) 271 if (dir == DMA_MEM_TO_DEV)
273 /* increment source address */ 272 /* increment source address */
274 src = sg_phys(sg); 273 src = sg_phys(sg);
275 else 274 else
@@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
293 lli->src_addr = src; 292 lli->src_addr = src;
294 lli->dst_addr = dst; 293 lli->dst_addr = dst;
295 294
296 if (dir == DMA_FROM_DEVICE) 295 if (dir == DMA_DEV_TO_MEM)
297 dst += elem_size; 296 dst += elem_size;
298 else 297 else
299 src += elem_size; 298 src += elem_size;
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h
index 7a5c80990e9..abff3714fdd 100644
--- a/drivers/dma/coh901318_lli.h
+++ b/drivers/dma/coh901318_lli.h
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
97 struct coh901318_lli *lli, 97 struct coh901318_lli *lli,
98 dma_addr_t buf, unsigned int size, 98 dma_addr_t buf, unsigned int size,
99 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, 99 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
100 enum dma_data_direction dir); 100 enum dma_transfer_direction dir);
101 101
102/** 102/**
103 * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer 103 * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
119 struct scatterlist *sg, unsigned int nents, 119 struct scatterlist *sg, unsigned int nents,
120 dma_addr_t dev_addr, u32 ctrl_chained, 120 dma_addr_t dev_addr, u32 ctrl_chained,
121 u32 ctrl, u32 ctrl_last, 121 u32 ctrl, u32 ctrl_last,
122 enum dma_data_direction dir, u32 ctrl_irq_mask); 122 enum dma_transfer_direction dir, u32 ctrl_irq_mask);
123 123
124#endif /* COH901318_LLI_H */ 124#endif /* COH901318_LLI_H */
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b48967b499d..a6c6051ec85 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
693 !device->device_prep_dma_interrupt); 693 !device->device_prep_dma_interrupt);
694 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 694 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
695 !device->device_prep_dma_sg); 695 !device->device_prep_dma_sg);
696 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
697 !device->device_prep_slave_sg);
698 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 696 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
699 !device->device_prep_dma_cyclic); 697 !device->device_prep_dma_cyclic);
700 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 698 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
701 !device->device_control); 699 !device->device_control);
700 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
701 !device->device_prep_interleaved_dma);
702 702
703 BUG_ON(!device->device_alloc_chan_resources); 703 BUG_ON(!device->device_alloc_chan_resources);
704 BUG_ON(!device->device_free_chan_resources); 704 BUG_ON(!device->device_free_chan_resources);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 9bfd6d36071..9b592b02b5f 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
166 return cookie; 166 return cookie;
167} 167}
168 168
169static void dwc_initialize(struct dw_dma_chan *dwc)
170{
171 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
172 struct dw_dma_slave *dws = dwc->chan.private;
173 u32 cfghi = DWC_CFGH_FIFO_MODE;
174 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
175
176 if (dwc->initialized == true)
177 return;
178
179 if (dws) {
180 /*
181 * We need controller-specific data to set up slave
182 * transfers.
183 */
184 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
185
186 cfghi = dws->cfg_hi;
187 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
188 }
189
190 channel_writel(dwc, CFG_LO, cfglo);
191 channel_writel(dwc, CFG_HI, cfghi);
192
193 /* Enable interrupts */
194 channel_set_bit(dw, MASK.XFER, dwc->mask);
195 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
196 channel_set_bit(dw, MASK.ERROR, dwc->mask);
197
198 dwc->initialized = true;
199}
200
169/*----------------------------------------------------------------------*/ 201/*----------------------------------------------------------------------*/
170 202
171/* Called with dwc->lock held and bh disabled */ 203/* Called with dwc->lock held and bh disabled */
@@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
189 return; 221 return;
190 } 222 }
191 223
224 dwc_initialize(dwc);
225
192 channel_writel(dwc, LLP, first->txd.phys); 226 channel_writel(dwc, LLP, first->txd.phys);
193 channel_writel(dwc, CTL_LO, 227 channel_writel(dwc, CTL_LO,
194 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 228 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
@@ -696,7 +730,7 @@ err_desc_get:
696 730
697static struct dma_async_tx_descriptor * 731static struct dma_async_tx_descriptor *
698dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 732dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
699 unsigned int sg_len, enum dma_data_direction direction, 733 unsigned int sg_len, enum dma_transfer_direction direction,
700 unsigned long flags) 734 unsigned long flags)
701{ 735{
702 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 736 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
@@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
720 prev = first = NULL; 754 prev = first = NULL;
721 755
722 switch (direction) { 756 switch (direction) {
723 case DMA_TO_DEVICE: 757 case DMA_MEM_TO_DEV:
724 ctllo = (DWC_DEFAULT_CTLLO(chan->private) 758 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
725 | DWC_CTLL_DST_WIDTH(reg_width) 759 | DWC_CTLL_DST_WIDTH(reg_width)
726 | DWC_CTLL_DST_FIX 760 | DWC_CTLL_DST_FIX
@@ -777,7 +811,7 @@ slave_sg_todev_fill_desc:
777 goto slave_sg_todev_fill_desc; 811 goto slave_sg_todev_fill_desc;
778 } 812 }
779 break; 813 break;
780 case DMA_FROM_DEVICE: 814 case DMA_DEV_TO_MEM:
781 ctllo = (DWC_DEFAULT_CTLLO(chan->private) 815 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
782 | DWC_CTLL_SRC_WIDTH(reg_width) 816 | DWC_CTLL_SRC_WIDTH(reg_width)
783 | DWC_CTLL_DST_INC 817 | DWC_CTLL_DST_INC
@@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
959 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 993 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
960 struct dw_dma *dw = to_dw_dma(chan->device); 994 struct dw_dma *dw = to_dw_dma(chan->device);
961 struct dw_desc *desc; 995 struct dw_desc *desc;
962 struct dw_dma_slave *dws;
963 int i; 996 int i;
964 u32 cfghi;
965 u32 cfglo;
966 unsigned long flags; 997 unsigned long flags;
967 998
968 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 999 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
975 1006
976 dwc->completed = chan->cookie = 1; 1007 dwc->completed = chan->cookie = 1;
977 1008
978 cfghi = DWC_CFGH_FIFO_MODE;
979 cfglo = 0;
980
981 dws = chan->private;
982 if (dws) {
983 /*
984 * We need controller-specific data to set up slave
985 * transfers.
986 */
987 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
988
989 cfghi = dws->cfg_hi;
990 cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
991 }
992
993 cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
994
995 channel_writel(dwc, CFG_LO, cfglo);
996 channel_writel(dwc, CFG_HI, cfghi);
997
998 /* 1009 /*
999 * NOTE: some controllers may have additional features that we 1010 * NOTE: some controllers may have additional features that we
1000 * need to initialize here, like "scatter-gather" (which 1011 * need to initialize here, like "scatter-gather" (which
@@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1026 i = ++dwc->descs_allocated; 1037 i = ++dwc->descs_allocated;
1027 } 1038 }
1028 1039
1029 /* Enable interrupts */
1030 channel_set_bit(dw, MASK.XFER, dwc->mask);
1031 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1032 channel_set_bit(dw, MASK.ERROR, dwc->mask);
1033
1034 spin_unlock_irqrestore(&dwc->lock, flags); 1040 spin_unlock_irqrestore(&dwc->lock, flags);
1035 1041
1036 dev_dbg(chan2dev(chan), 1042 dev_dbg(chan2dev(chan),
@@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1058 spin_lock_irqsave(&dwc->lock, flags); 1064 spin_lock_irqsave(&dwc->lock, flags);
1059 list_splice_init(&dwc->free_list, &list); 1065 list_splice_init(&dwc->free_list, &list);
1060 dwc->descs_allocated = 0; 1066 dwc->descs_allocated = 0;
1067 dwc->initialized = false;
1061 1068
1062 /* Disable interrupts */ 1069 /* Disable interrupts */
1063 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1070 channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop);
1165 */ 1172 */
1166struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 1173struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1167 dma_addr_t buf_addr, size_t buf_len, size_t period_len, 1174 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1168 enum dma_data_direction direction) 1175 enum dma_transfer_direction direction)
1169{ 1176{
1170 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1177 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1171 struct dw_cyclic_desc *cdesc; 1178 struct dw_cyclic_desc *cdesc;
@@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1206 goto out_err; 1213 goto out_err;
1207 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 1214 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1208 goto out_err; 1215 goto out_err;
1209 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) 1216 if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
1210 goto out_err; 1217 goto out_err;
1211 1218
1212 retval = ERR_PTR(-ENOMEM); 1219 retval = ERR_PTR(-ENOMEM);
@@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1228 goto out_err_desc_get; 1235 goto out_err_desc_get;
1229 1236
1230 switch (direction) { 1237 switch (direction) {
1231 case DMA_TO_DEVICE: 1238 case DMA_MEM_TO_DEV:
1232 desc->lli.dar = dws->tx_reg; 1239 desc->lli.dar = dws->tx_reg;
1233 desc->lli.sar = buf_addr + (period_len * i); 1240 desc->lli.sar = buf_addr + (period_len * i);
1234 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) 1241 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1239 | DWC_CTLL_FC(dws->fc) 1246 | DWC_CTLL_FC(dws->fc)
1240 | DWC_CTLL_INT_EN); 1247 | DWC_CTLL_INT_EN);
1241 break; 1248 break;
1242 case DMA_FROM_DEVICE: 1249 case DMA_DEV_TO_MEM:
1243 desc->lli.dar = buf_addr + (period_len * i); 1250 desc->lli.dar = buf_addr + (period_len * i);
1244 desc->lli.sar = dws->rx_reg; 1251 desc->lli.sar = dws->rx_reg;
1245 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) 1252 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
1335 1342
1336static void dw_dma_off(struct dw_dma *dw) 1343static void dw_dma_off(struct dw_dma *dw)
1337{ 1344{
1345 int i;
1346
1338 dma_writel(dw, CFG, 0); 1347 dma_writel(dw, CFG, 0);
1339 1348
1340 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1349 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw)
1345 1354
1346 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) 1355 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1347 cpu_relax(); 1356 cpu_relax();
1357
1358 for (i = 0; i < dw->dma.chancnt; i++)
1359 dw->chan[i].initialized = false;
1348} 1360}
1349 1361
1350static int __init dw_probe(struct platform_device *pdev) 1362static int __init dw_probe(struct platform_device *pdev)
@@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev)
1533 1545
1534 dw_dma_off(platform_get_drvdata(pdev)); 1546 dw_dma_off(platform_get_drvdata(pdev));
1535 clk_disable(dw->clk); 1547 clk_disable(dw->clk);
1548
1536 return 0; 1549 return 0;
1537} 1550}
1538 1551
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index c3419518d70..5eef6946a36 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -140,6 +140,7 @@ struct dw_dma_chan {
140 u8 mask; 140 u8 mask;
141 u8 priority; 141 u8 priority;
142 bool paused; 142 bool paused;
143 bool initialized;
143 144
144 spinlock_t lock; 145 spinlock_t lock;
145 146
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index b47e2b803fa..59e7a965772 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
246static struct ep93xx_dma_desc * 246static struct ep93xx_dma_desc *
247ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) 247ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
248{ 248{
249 if (list_empty(&edmac->active))
250 return NULL;
251
249 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); 252 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
250} 253}
251 254
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
263 */ 266 */
264static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) 267static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
265{ 268{
269 struct ep93xx_dma_desc *desc;
270
266 list_rotate_left(&edmac->active); 271 list_rotate_left(&edmac->active);
267 272
268 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) 273 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
269 return true; 274 return true;
270 275
276 desc = ep93xx_dma_get_active(edmac);
277 if (!desc)
278 return false;
279
271 /* 280 /*
272 * If txd.cookie is set it means that we are back in the first 281 * If txd.cookie is set it means that we are back in the first
273 * descriptor in the chain and hence done with it. 282 * descriptor in the chain and hence done with it.
274 */ 283 */
275 return !ep93xx_dma_get_active(edmac)->txd.cookie; 284 return !desc->txd.cookie;
276} 285}
277 286
278/* 287/*
@@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
327 336
328static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) 337static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
329{ 338{
330 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); 339 struct ep93xx_dma_desc *desc;
331 u32 bus_addr; 340 u32 bus_addr;
332 341
333 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) 342 desc = ep93xx_dma_get_active(edmac);
343 if (!desc) {
344 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
345 return;
346 }
347
348 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
334 bus_addr = desc->src_addr; 349 bus_addr = desc->src_addr;
335 else 350 else
336 bus_addr = desc->dst_addr; 351 bus_addr = desc->dst_addr;
@@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
443 control = (5 << M2M_CONTROL_PWSC_SHIFT); 458 control = (5 << M2M_CONTROL_PWSC_SHIFT);
444 control |= M2M_CONTROL_NO_HDSK; 459 control |= M2M_CONTROL_NO_HDSK;
445 460
446 if (data->direction == DMA_TO_DEVICE) { 461 if (data->direction == DMA_MEM_TO_DEV) {
447 control |= M2M_CONTROL_DAH; 462 control |= M2M_CONTROL_DAH;
448 control |= M2M_CONTROL_TM_TX; 463 control |= M2M_CONTROL_TM_TX;
449 control |= M2M_CONTROL_RSS_SSPTX; 464 control |= M2M_CONTROL_RSS_SSPTX;
@@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
459 * This IDE part is totally untested. Values below are taken 474 * This IDE part is totally untested. Values below are taken
460 * from the EP93xx Users's Guide and might not be correct. 475 * from the EP93xx Users's Guide and might not be correct.
461 */ 476 */
462 control |= M2M_CONTROL_NO_HDSK; 477 if (data->direction == DMA_MEM_TO_DEV) {
463 control |= M2M_CONTROL_RSS_IDE;
464 control |= M2M_CONTROL_PW_16;
465
466 if (data->direction == DMA_TO_DEVICE) {
467 /* Worst case from the UG */ 478 /* Worst case from the UG */
468 control = (3 << M2M_CONTROL_PWSC_SHIFT); 479 control = (3 << M2M_CONTROL_PWSC_SHIFT);
469 control |= M2M_CONTROL_DAH; 480 control |= M2M_CONTROL_DAH;
@@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
473 control |= M2M_CONTROL_SAH; 484 control |= M2M_CONTROL_SAH;
474 control |= M2M_CONTROL_TM_RX; 485 control |= M2M_CONTROL_TM_RX;
475 } 486 }
487
488 control |= M2M_CONTROL_NO_HDSK;
489 control |= M2M_CONTROL_RSS_IDE;
490 control |= M2M_CONTROL_PW_16;
476 break; 491 break;
477 492
478 default: 493 default:
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
491 506
492static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) 507static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
493{ 508{
494 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); 509 struct ep93xx_dma_desc *desc;
510
511 desc = ep93xx_dma_get_active(edmac);
512 if (!desc) {
513 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
514 return;
515 }
495 516
496 if (edmac->buffer == 0) { 517 if (edmac->buffer == 0) {
497 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); 518 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data)
669{ 690{
670 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; 691 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
671 struct ep93xx_dma_desc *desc, *d; 692 struct ep93xx_dma_desc *desc, *d;
672 dma_async_tx_callback callback; 693 dma_async_tx_callback callback = NULL;
673 void *callback_param; 694 void *callback_param = NULL;
674 LIST_HEAD(list); 695 LIST_HEAD(list);
675 696
676 spin_lock_irq(&edmac->lock); 697 spin_lock_irq(&edmac->lock);
698 /*
699 * If dma_terminate_all() was called before we get to run, the active
700 * list has become empty. If that happens we aren't supposed to do
701 * anything more than call ep93xx_dma_advance_work().
702 */
677 desc = ep93xx_dma_get_active(edmac); 703 desc = ep93xx_dma_get_active(edmac);
678 if (desc->complete) { 704 if (desc) {
679 edmac->last_completed = desc->txd.cookie; 705 if (desc->complete) {
680 list_splice_init(&edmac->active, &list); 706 edmac->last_completed = desc->txd.cookie;
707 list_splice_init(&edmac->active, &list);
708 }
709 callback = desc->txd.callback;
710 callback_param = desc->txd.callback_param;
681 } 711 }
682 spin_unlock_irq(&edmac->lock); 712 spin_unlock_irq(&edmac->lock);
683 713
684 /* Pick up the next descriptor from the queue */ 714 /* Pick up the next descriptor from the queue */
685 ep93xx_dma_advance_work(edmac); 715 ep93xx_dma_advance_work(edmac);
686 716
687 callback = desc->txd.callback;
688 callback_param = desc->txd.callback_param;
689
690 /* Now we can release all the chained descriptors */ 717 /* Now we can release all the chained descriptors */
691 list_for_each_entry_safe(desc, d, &list, node) { 718 list_for_each_entry_safe(desc, d, &list, node) {
692 /* 719 /*
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data)
706static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) 733static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
707{ 734{
708 struct ep93xx_dma_chan *edmac = dev_id; 735 struct ep93xx_dma_chan *edmac = dev_id;
736 struct ep93xx_dma_desc *desc;
709 irqreturn_t ret = IRQ_HANDLED; 737 irqreturn_t ret = IRQ_HANDLED;
710 738
711 spin_lock(&edmac->lock); 739 spin_lock(&edmac->lock);
712 740
741 desc = ep93xx_dma_get_active(edmac);
742 if (!desc) {
743 dev_warn(chan2dev(edmac),
744 "got interrupt while active list is empty\n");
745 spin_unlock(&edmac->lock);
746 return IRQ_NONE;
747 }
748
713 switch (edmac->edma->hw_interrupt(edmac)) { 749 switch (edmac->edma->hw_interrupt(edmac)) {
714 case INTERRUPT_DONE: 750 case INTERRUPT_DONE:
715 ep93xx_dma_get_active(edmac)->complete = true; 751 desc->complete = true;
716 tasklet_schedule(&edmac->tasklet); 752 tasklet_schedule(&edmac->tasklet);
717 break; 753 break;
718 754
@@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
803 switch (data->port) { 839 switch (data->port) {
804 case EP93XX_DMA_SSP: 840 case EP93XX_DMA_SSP:
805 case EP93XX_DMA_IDE: 841 case EP93XX_DMA_IDE:
806 if (data->direction != DMA_TO_DEVICE && 842 if (data->direction != DMA_MEM_TO_DEV &&
807 data->direction != DMA_FROM_DEVICE) 843 data->direction != DMA_DEV_TO_MEM)
808 return -EINVAL; 844 return -EINVAL;
809 break; 845 break;
810 default: 846 default:
@@ -952,7 +988,7 @@ fail:
952 */ 988 */
953static struct dma_async_tx_descriptor * 989static struct dma_async_tx_descriptor *
954ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 990ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
955 unsigned int sg_len, enum dma_data_direction dir, 991 unsigned int sg_len, enum dma_transfer_direction dir,
956 unsigned long flags) 992 unsigned long flags)
957{ 993{
958 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 994 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
@@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
988 goto fail; 1024 goto fail;
989 } 1025 }
990 1026
991 if (dir == DMA_TO_DEVICE) { 1027 if (dir == DMA_MEM_TO_DEV) {
992 desc->src_addr = sg_dma_address(sg); 1028 desc->src_addr = sg_dma_address(sg);
993 desc->dst_addr = edmac->runtime_addr; 1029 desc->dst_addr = edmac->runtime_addr;
994 } else { 1030 } else {
@@ -1032,7 +1068,7 @@ fail:
1032static struct dma_async_tx_descriptor * 1068static struct dma_async_tx_descriptor *
1033ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 1069ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1034 size_t buf_len, size_t period_len, 1070 size_t buf_len, size_t period_len,
1035 enum dma_data_direction dir) 1071 enum dma_transfer_direction dir)
1036{ 1072{
1037 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1073 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1038 struct ep93xx_dma_desc *desc, *first; 1074 struct ep93xx_dma_desc *desc, *first;
@@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1065 goto fail; 1101 goto fail;
1066 } 1102 }
1067 1103
1068 if (dir == DMA_TO_DEVICE) { 1104 if (dir == DMA_MEM_TO_DEV) {
1069 desc->src_addr = dma_addr + offset; 1105 desc->src_addr = dma_addr + offset;
1070 desc->dst_addr = edmac->runtime_addr; 1106 desc->dst_addr = edmac->runtime_addr;
1071 } else { 1107 } else {
@@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1133 return -EINVAL; 1169 return -EINVAL;
1134 1170
1135 switch (config->direction) { 1171 switch (config->direction) {
1136 case DMA_FROM_DEVICE: 1172 case DMA_DEV_TO_MEM:
1137 width = config->src_addr_width; 1173 width = config->src_addr_width;
1138 addr = config->src_addr; 1174 addr = config->src_addr;
1139 break; 1175 break;
1140 1176
1141 case DMA_TO_DEVICE: 1177 case DMA_MEM_TO_DEV:
1142 width = config->dst_addr_width; 1178 width = config->dst_addr_width;
1143 addr = config->dst_addr; 1179 addr = config->dst_addr;
1144 break; 1180 break;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 8a781540590..b98070c33ca 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -772,7 +772,7 @@ fail:
772 */ 772 */
773static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( 773static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
774 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 774 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
775 enum dma_data_direction direction, unsigned long flags) 775 enum dma_transfer_direction direction, unsigned long flags)
776{ 776{
777 /* 777 /*
778 * This operation is not supported on the Freescale DMA controller 778 * This operation is not supported on the Freescale DMA controller
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
819 return -ENXIO; 819 return -ENXIO;
820 820
821 /* we set the controller burst size depending on direction */ 821 /* we set the controller burst size depending on direction */
822 if (config->direction == DMA_TO_DEVICE) 822 if (config->direction == DMA_MEM_TO_DEV)
823 size = config->dst_addr_width * config->dst_maxburst; 823 size = config->dst_addr_width * config->dst_maxburst;
824 else 824 else
825 size = config->src_addr_width * config->src_maxburst; 825 size = config->src_addr_width * config->src_maxburst;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 4be55f9bb6c..e4383ee2c9a 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
107 imx_dma_disable(imxdmac->imxdma_channel); 107 imx_dma_disable(imxdmac->imxdma_channel);
108 return 0; 108 return 0;
109 case DMA_SLAVE_CONFIG: 109 case DMA_SLAVE_CONFIG:
110 if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { 110 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
111 imxdmac->per_address = dmaengine_cfg->src_addr; 111 imxdmac->per_address = dmaengine_cfg->src_addr;
112 imxdmac->watermark_level = dmaengine_cfg->src_maxburst; 112 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
113 imxdmac->word_size = dmaengine_cfg->src_addr_width; 113 imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
224 224
225static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( 225static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
226 struct dma_chan *chan, struct scatterlist *sgl, 226 struct dma_chan *chan, struct scatterlist *sgl,
227 unsigned int sg_len, enum dma_data_direction direction, 227 unsigned int sg_len, enum dma_transfer_direction direction,
228 unsigned long flags) 228 unsigned long flags)
229{ 229{
230 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 230 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
241 dma_length += sg->length; 241 dma_length += sg->length;
242 } 242 }
243 243
244 if (direction == DMA_FROM_DEVICE) 244 if (direction == DMA_DEV_TO_MEM)
245 dmamode = DMA_MODE_READ; 245 dmamode = DMA_MODE_READ;
246 else 246 else
247 dmamode = DMA_MODE_WRITE; 247 dmamode = DMA_MODE_WRITE;
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
271 271
272static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( 272static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
273 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 273 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
274 size_t period_len, enum dma_data_direction direction) 274 size_t period_len, enum dma_transfer_direction direction)
275{ 275{
276 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 276 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
277 struct imxdma_engine *imxdma = imxdmac->imxdma; 277 struct imxdma_engine *imxdma = imxdmac->imxdma;
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
317 imxdmac->sg_list[periods].page_link = 317 imxdmac->sg_list[periods].page_link =
318 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 318 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
319 319
320 if (direction == DMA_FROM_DEVICE) 320 if (direction == DMA_DEV_TO_MEM)
321 dmamode = DMA_MODE_READ; 321 dmamode = DMA_MODE_READ;
322 else 322 else
323 dmamode = DMA_MODE_WRITE; 323 dmamode = DMA_MODE_WRITE;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f993955a640..a8af379680c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -247,7 +247,7 @@ struct sdma_engine;
247struct sdma_channel { 247struct sdma_channel {
248 struct sdma_engine *sdma; 248 struct sdma_engine *sdma;
249 unsigned int channel; 249 unsigned int channel;
250 enum dma_data_direction direction; 250 enum dma_transfer_direction direction;
251 enum sdma_peripheral_type peripheral_type; 251 enum sdma_peripheral_type peripheral_type;
252 unsigned int event_id0; 252 unsigned int event_id0;
253 unsigned int event_id1; 253 unsigned int event_id1;
@@ -268,6 +268,8 @@ struct sdma_channel {
268 struct dma_async_tx_descriptor desc; 268 struct dma_async_tx_descriptor desc;
269 dma_cookie_t last_completed; 269 dma_cookie_t last_completed;
270 enum dma_status status; 270 enum dma_status status;
271 unsigned int chn_count;
272 unsigned int chn_real_count;
271}; 273};
272 274
273#define IMX_DMA_SG_LOOP (1 << 0) 275#define IMX_DMA_SG_LOOP (1 << 0)
@@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
503 struct sdma_buffer_descriptor *bd; 505 struct sdma_buffer_descriptor *bd;
504 int i, error = 0; 506 int i, error = 0;
505 507
508 sdmac->chn_real_count = 0;
506 /* 509 /*
507 * non loop mode. Iterate over all descriptors, collect 510 * non loop mode. Iterate over all descriptors, collect
508 * errors and call callback function 511 * errors and call callback function
@@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
512 515
513 if (bd->mode.status & (BD_DONE | BD_RROR)) 516 if (bd->mode.status & (BD_DONE | BD_RROR))
514 error = -EIO; 517 error = -EIO;
518 sdmac->chn_real_count += bd->mode.count;
515 } 519 }
516 520
517 if (error) 521 if (error)
@@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
519 else 523 else
520 sdmac->status = DMA_SUCCESS; 524 sdmac->status = DMA_SUCCESS;
521 525
526 sdmac->last_completed = sdmac->desc.cookie;
522 if (sdmac->desc.callback) 527 if (sdmac->desc.callback)
523 sdmac->desc.callback(sdmac->desc.callback_param); 528 sdmac->desc.callback(sdmac->desc.callback_param);
524 sdmac->last_completed = sdmac->desc.cookie;
525} 529}
526 530
527static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) 531static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
@@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
650 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 654 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
651 int ret; 655 int ret;
652 656
653 if (sdmac->direction == DMA_FROM_DEVICE) { 657 if (sdmac->direction == DMA_DEV_TO_MEM) {
654 load_address = sdmac->pc_from_device; 658 load_address = sdmac->pc_from_device;
655 } else { 659 } else {
656 load_address = sdmac->pc_to_device; 660 load_address = sdmac->pc_to_device;
@@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
832 836
833static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 837static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
834{ 838{
839 unsigned long flags;
835 struct sdma_channel *sdmac = to_sdma_chan(tx->chan); 840 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
836 struct sdma_engine *sdma = sdmac->sdma; 841 struct sdma_engine *sdma = sdmac->sdma;
837 dma_cookie_t cookie; 842 dma_cookie_t cookie;
838 843
839 spin_lock_irq(&sdmac->lock); 844 spin_lock_irqsave(&sdmac->lock, flags);
840 845
841 cookie = sdma_assign_cookie(sdmac); 846 cookie = sdma_assign_cookie(sdmac);
842 847
843 sdma_enable_channel(sdma, sdmac->channel); 848 sdma_enable_channel(sdma, sdmac->channel);
844 849
845 spin_unlock_irq(&sdmac->lock); 850 spin_unlock_irqrestore(&sdmac->lock, flags);
846 851
847 return cookie; 852 return cookie;
848} 853}
@@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
911 916
912static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 917static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
913 struct dma_chan *chan, struct scatterlist *sgl, 918 struct dma_chan *chan, struct scatterlist *sgl,
914 unsigned int sg_len, enum dma_data_direction direction, 919 unsigned int sg_len, enum dma_transfer_direction direction,
915 unsigned long flags) 920 unsigned long flags)
916{ 921{
917 struct sdma_channel *sdmac = to_sdma_chan(chan); 922 struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
941 goto err_out; 946 goto err_out;
942 } 947 }
943 948
949 sdmac->chn_count = 0;
944 for_each_sg(sgl, sg, sg_len, i) { 950 for_each_sg(sgl, sg, sg_len, i) {
945 struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 951 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
946 int param; 952 int param;
@@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
957 } 963 }
958 964
959 bd->mode.count = count; 965 bd->mode.count = count;
966 sdmac->chn_count += count;
960 967
961 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { 968 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
962 ret = -EINVAL; 969 ret = -EINVAL;
@@ -1008,7 +1015,7 @@ err_out:
1008 1015
1009static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 1016static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1010 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1017 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1011 size_t period_len, enum dma_data_direction direction) 1018 size_t period_len, enum dma_transfer_direction direction)
1012{ 1019{
1013 struct sdma_channel *sdmac = to_sdma_chan(chan); 1020 struct sdma_channel *sdmac = to_sdma_chan(chan);
1014 struct sdma_engine *sdma = sdmac->sdma; 1021 struct sdma_engine *sdma = sdmac->sdma;
@@ -1093,7 +1100,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1093 sdma_disable_channel(sdmac); 1100 sdma_disable_channel(sdmac);
1094 return 0; 1101 return 0;
1095 case DMA_SLAVE_CONFIG: 1102 case DMA_SLAVE_CONFIG:
1096 if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { 1103 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1097 sdmac->per_address = dmaengine_cfg->src_addr; 1104 sdmac->per_address = dmaengine_cfg->src_addr;
1098 sdmac->watermark_level = dmaengine_cfg->src_maxburst; 1105 sdmac->watermark_level = dmaengine_cfg->src_maxburst;
1099 sdmac->word_size = dmaengine_cfg->src_addr_width; 1106 sdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -1102,6 +1109,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1102 sdmac->watermark_level = dmaengine_cfg->dst_maxburst; 1109 sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
1103 sdmac->word_size = dmaengine_cfg->dst_addr_width; 1110 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1104 } 1111 }
1112 sdmac->direction = dmaengine_cfg->direction;
1105 return sdma_config_channel(sdmac); 1113 return sdma_config_channel(sdmac);
1106 default: 1114 default:
1107 return -ENOSYS; 1115 return -ENOSYS;
@@ -1119,7 +1127,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1119 1127
1120 last_used = chan->cookie; 1128 last_used = chan->cookie;
1121 1129
1122 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); 1130 dma_set_tx_state(txstate, sdmac->last_completed, last_used,
1131 sdmac->chn_count - sdmac->chn_real_count);
1123 1132
1124 return sdmac->status; 1133 return sdmac->status;
1125} 1134}
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 19a0c64d45d..74f70aadf9e 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
280 * callbacks but must be called with the lock held. 280 * callbacks but must be called with the lock held.
281 */ 281 */
282static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 282static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
283 struct intel_mid_dma_desc *desc) 283 struct intel_mid_dma_desc *desc)
284 __releases(&midc->lock) __acquires(&midc->lock)
284{ 285{
285 struct dma_async_tx_descriptor *txd = &desc->txd; 286 struct dma_async_tx_descriptor *txd = &desc->txd;
286 dma_async_tx_callback callback_txd = NULL; 287 dma_async_tx_callback callback_txd = NULL;
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
311 pci_pool_free(desc->lli_pool, desc->lli, 312 pci_pool_free(desc->lli_pool, desc->lli,
312 desc->lli_phys); 313 desc->lli_phys);
313 pci_pool_destroy(desc->lli_pool); 314 pci_pool_destroy(desc->lli_pool);
315 desc->lli = NULL;
314 } 316 }
315 list_move(&desc->desc_node, &midc->free_list); 317 list_move(&desc->desc_node, &midc->free_list);
316 midc->busy = false; 318 midc->busy = false;
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
395 midc->dma->block_size); 397 midc->dma->block_size);
396 /*Populate SAR and DAR values*/ 398 /*Populate SAR and DAR values*/
397 sg_phy_addr = sg_phys(sg); 399 sg_phy_addr = sg_phys(sg);
398 if (desc->dirn == DMA_TO_DEVICE) { 400 if (desc->dirn == DMA_MEM_TO_DEV) {
399 lli_bloc_desc->sar = sg_phy_addr; 401 lli_bloc_desc->sar = sg_phy_addr;
400 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 402 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
401 } else if (desc->dirn == DMA_FROM_DEVICE) { 403 } else if (desc->dirn == DMA_DEV_TO_MEM) {
402 lli_bloc_desc->sar = mids->dma_slave.src_addr; 404 lli_bloc_desc->sar = mids->dma_slave.src_addr;
403 lli_bloc_desc->dar = sg_phy_addr; 405 lli_bloc_desc->dar = sg_phy_addr;
404 } 406 }
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
490 492
491 ret = dma_async_is_complete(cookie, last_complete, last_used); 493 ret = dma_async_is_complete(cookie, last_complete, last_used);
492 if (ret != DMA_SUCCESS) { 494 if (ret != DMA_SUCCESS) {
495 spin_lock_bh(&midc->lock);
493 midc_scan_descriptors(to_middma_device(chan->device), midc); 496 midc_scan_descriptors(to_middma_device(chan->device), midc);
497 spin_unlock_bh(&midc->lock);
494 498
495 last_complete = midc->completed; 499 last_complete = midc->completed;
496 last_used = chan->cookie; 500 last_used = chan->cookie;
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
566 pci_pool_free(desc->lli_pool, desc->lli, 570 pci_pool_free(desc->lli_pool, desc->lli,
567 desc->lli_phys); 571 desc->lli_phys);
568 pci_pool_destroy(desc->lli_pool); 572 pci_pool_destroy(desc->lli_pool);
573 desc->lli = NULL;
569 } 574 }
570 list_move(&desc->desc_node, &midc->free_list); 575 list_move(&desc->desc_node, &midc->free_list);
571 } 576 }
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
632 if (midc->dma->pimr_mask) { 637 if (midc->dma->pimr_mask) {
633 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 638 cfg_hi.cfgx.protctl = 0x0; /*default value*/
634 cfg_hi.cfgx.fifo_mode = 1; 639 cfg_hi.cfgx.fifo_mode = 1;
635 if (mids->dma_slave.direction == DMA_TO_DEVICE) { 640 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
636 cfg_hi.cfgx.src_per = 0; 641 cfg_hi.cfgx.src_per = 0;
637 if (mids->device_instance == 0) 642 if (mids->device_instance == 0)
638 cfg_hi.cfgx.dst_per = 3; 643 cfg_hi.cfgx.dst_per = 3;
639 if (mids->device_instance == 1) 644 if (mids->device_instance == 1)
640 cfg_hi.cfgx.dst_per = 1; 645 cfg_hi.cfgx.dst_per = 1;
641 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 646 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
642 if (mids->device_instance == 0) 647 if (mids->device_instance == 0)
643 cfg_hi.cfgx.src_per = 2; 648 cfg_hi.cfgx.src_per = 2;
644 if (mids->device_instance == 1) 649 if (mids->device_instance == 1)
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
682 ctl_lo.ctlx.sinc = 0; 687 ctl_lo.ctlx.sinc = 0;
683 ctl_lo.ctlx.dinc = 0; 688 ctl_lo.ctlx.dinc = 0;
684 } else { 689 } else {
685 if (mids->dma_slave.direction == DMA_TO_DEVICE) { 690 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
686 ctl_lo.ctlx.sinc = 0; 691 ctl_lo.ctlx.sinc = 0;
687 ctl_lo.ctlx.dinc = 2; 692 ctl_lo.ctlx.dinc = 2;
688 ctl_lo.ctlx.tt_fc = 1; 693 ctl_lo.ctlx.tt_fc = 1;
689 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 694 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
690 ctl_lo.ctlx.sinc = 2; 695 ctl_lo.ctlx.sinc = 2;
691 ctl_lo.ctlx.dinc = 0; 696 ctl_lo.ctlx.dinc = 0;
692 ctl_lo.ctlx.tt_fc = 2; 697 ctl_lo.ctlx.tt_fc = 2;
@@ -732,7 +737,7 @@ err_desc_get:
732 */ 737 */
733static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 738static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
734 struct dma_chan *chan, struct scatterlist *sgl, 739 struct dma_chan *chan, struct scatterlist *sgl,
735 unsigned int sg_len, enum dma_data_direction direction, 740 unsigned int sg_len, enum dma_transfer_direction direction,
736 unsigned long flags) 741 unsigned long flags)
737{ 742{
738 struct intel_mid_dma_chan *midc = NULL; 743 struct intel_mid_dma_chan *midc = NULL;
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
868 pm_runtime_get_sync(&mid->pdev->dev); 873 pm_runtime_get_sync(&mid->pdev->dev);
869 874
870 if (mid->state == SUSPENDED) { 875 if (mid->state == SUSPENDED) {
871 if (dma_resume(mid->pdev)) { 876 if (dma_resume(&mid->pdev->dev)) {
872 pr_err("ERR_MDMA: resume failed"); 877 pr_err("ERR_MDMA: resume failed");
873 return -EFAULT; 878 return -EFAULT;
874 } 879 }
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
1099 LNW_PERIPHRAL_MASK_SIZE); 1104 LNW_PERIPHRAL_MASK_SIZE);
1100 if (dma->mask_reg == NULL) { 1105 if (dma->mask_reg == NULL) {
1101 pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); 1106 pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
1102 return -ENOMEM; 1107 err = -ENOMEM;
1108 goto err_ioremap;
1103 } 1109 }
1104 } else 1110 } else
1105 dma->mask_reg = NULL; 1111 dma->mask_reg = NULL;
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
1196err_engine: 1202err_engine:
1197 free_irq(pdev->irq, dma); 1203 free_irq(pdev->irq, dma);
1198err_irq: 1204err_irq:
1205 if (dma->mask_reg)
1206 iounmap(dma->mask_reg);
1207err_ioremap:
1199 pci_pool_destroy(dma->dma_pool); 1208 pci_pool_destroy(dma->dma_pool);
1200err_dma_pool: 1209err_dma_pool:
1201 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1210 pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1337* 1346*
1338* This function is called by OS when a power event occurs 1347* This function is called by OS when a power event occurs
1339*/ 1348*/
1340int dma_suspend(struct pci_dev *pci, pm_message_t state) 1349static int dma_suspend(struct device *dev)
1341{ 1350{
1351 struct pci_dev *pci = to_pci_dev(dev);
1342 int i; 1352 int i;
1343 struct middma_device *device = pci_get_drvdata(pci); 1353 struct middma_device *device = pci_get_drvdata(pci);
1344 pr_debug("MDMA: dma_suspend called\n"); 1354 pr_debug("MDMA: dma_suspend called\n");
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
1362* 1372*
1363* This function is called by OS when a power event occurs 1373* This function is called by OS when a power event occurs
1364*/ 1374*/
1365int dma_resume(struct pci_dev *pci) 1375int dma_resume(struct device *dev)
1366{ 1376{
1377 struct pci_dev *pci = to_pci_dev(dev);
1367 int ret; 1378 int ret;
1368 struct middma_device *device = pci_get_drvdata(pci); 1379 struct middma_device *device = pci_get_drvdata(pci);
1369 1380
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
1429 .runtime_suspend = dma_runtime_suspend, 1440 .runtime_suspend = dma_runtime_suspend,
1430 .runtime_resume = dma_runtime_resume, 1441 .runtime_resume = dma_runtime_resume,
1431 .runtime_idle = dma_runtime_idle, 1442 .runtime_idle = dma_runtime_idle,
1443 .suspend = dma_suspend,
1444 .resume = dma_resume,
1432}; 1445};
1433 1446
1434static struct pci_driver intel_mid_dma_pci_driver = { 1447static struct pci_driver intel_mid_dma_pci_driver = {
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {
1437 .probe = intel_mid_dma_probe, 1450 .probe = intel_mid_dma_probe,
1438 .remove = __devexit_p(intel_mid_dma_remove), 1451 .remove = __devexit_p(intel_mid_dma_remove),
1439#ifdef CONFIG_PM 1452#ifdef CONFIG_PM
1440 .suspend = dma_suspend,
1441 .resume = dma_resume,
1442 .driver = { 1453 .driver = {
1443 .pm = &intel_mid_dma_pm, 1454 .pm = &intel_mid_dma_pm,
1444 }, 1455 },
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index aea5ee88ce0..c83d35b97bd 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc {
262 unsigned int lli_length; 262 unsigned int lli_length;
263 unsigned int current_lli; 263 unsigned int current_lli;
264 dma_addr_t next; 264 dma_addr_t next;
265 enum dma_data_direction dirn; 265 enum dma_transfer_direction dirn;
266 enum dma_status status; 266 enum dma_status status;
267 enum dma_slave_buswidth width; /*width of DMA txn*/ 267 enum dma_slave_buswidth width; /*width of DMA txn*/
268 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ 268 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
296} 296}
297 297
298 298
299int dma_resume(struct pci_dev *pci); 299int dma_resume(struct device *dev);
300 300
301#endif /*__INTEL_MID_DMAC_REGS_H__*/ 301#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e03f811a83d..04be90b645b 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1735 spin_unlock_bh(&iop_chan->lock); 1735 spin_unlock_bh(&iop_chan->lock);
1736} 1736}
1737 1737
1738MODULE_ALIAS("platform:iop-adma");
1739
1740static struct platform_driver iop_adma_driver = { 1738static struct platform_driver iop_adma_driver = {
1741 .probe = iop_adma_probe, 1739 .probe = iop_adma_probe,
1742 .remove = __devexit_p(iop_adma_remove), 1740 .remove = __devexit_p(iop_adma_remove),
@@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = {
1746 }, 1744 },
1747}; 1745};
1748 1746
1749static int __init iop_adma_init (void) 1747module_platform_driver(iop_adma_driver);
1750{
1751 return platform_driver_register(&iop_adma_driver);
1752}
1753
1754static void __exit iop_adma_exit (void)
1755{
1756 platform_driver_unregister(&iop_adma_driver);
1757 return;
1758}
1759module_exit(iop_adma_exit);
1760module_init(iop_adma_init);
1761 1748
1762MODULE_AUTHOR("Intel Corporation"); 1749MODULE_AUTHOR("Intel Corporation");
1763MODULE_DESCRIPTION("IOP ADMA Engine Driver"); 1750MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1764MODULE_LICENSE("GPL"); 1751MODULE_LICENSE("GPL");
1752MODULE_ALIAS("platform:iop-adma");
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 0e5ef33f90a..6212b16e8cf 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
312 case IPU_PIX_FMT_RGB565: 312 case IPU_PIX_FMT_RGB565:
313 params->ip.bpp = 2; 313 params->ip.bpp = 2;
314 params->ip.pfs = 4; 314 params->ip.pfs = 4;
315 params->ip.npb = 7; 315 params->ip.npb = 15;
316 params->ip.sat = 2; /* SAT = 32-bit access */ 316 params->ip.sat = 2; /* SAT = 32-bit access */
317 params->ip.ofs0 = 0; /* Red bit offset */ 317 params->ip.ofs0 = 0; /* Red bit offset */
318 params->ip.ofs1 = 5; /* Green bit offset */ 318 params->ip.ofs1 = 5; /* Green bit offset */
@@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
422 params->pp.nsb = 1; 422 params->pp.nsb = 1;
423} 423}
424 424
425static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
426 uint16_t burst_pixels)
427{
428 params->pp.npb = burst_pixels - 1;
429}
430
431static void ipu_ch_param_set_buffer(union chan_param_mem *params, 425static void ipu_ch_param_set_buffer(union chan_param_mem *params,
432 dma_addr_t buf0, dma_addr_t buf1) 426 dma_addr_t buf0, dma_addr_t buf1)
433{ 427{
@@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
690 ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes); 684 ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
691 ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1); 685 ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
692 ipu_ch_param_set_rotation(&params, rot_mode); 686 ipu_ch_param_set_rotation(&params, rot_mode);
693 /* Some channels (rotation) have restriction on burst length */
694 switch (channel) {
695 case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
696 invalid - Table 44-30 */
697/*
698 ipu_ch_param_set_burst_size(&params, 8);
699 */
700 break;
701 case IDMAC_SDC_0:
702 case IDMAC_SDC_1:
703 /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
704 ipu_ch_param_set_burst_size(&params, 16);
705 break;
706 case IDMAC_IC_0:
707 default:
708 break;
709 }
710 687
711 spin_lock_irqsave(&ipu->lock, flags); 688 spin_lock_irqsave(&ipu->lock, flags);
712 689
@@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg)
1364/* Allocate and initialise a transfer descriptor. */ 1341/* Allocate and initialise a transfer descriptor. */
1365static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, 1342static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1366 struct scatterlist *sgl, unsigned int sg_len, 1343 struct scatterlist *sgl, unsigned int sg_len,
1367 enum dma_data_direction direction, unsigned long tx_flags) 1344 enum dma_transfer_direction direction, unsigned long tx_flags)
1368{ 1345{
1369 struct idmac_channel *ichan = to_idmac_chan(chan); 1346 struct idmac_channel *ichan = to_idmac_chan(chan);
1370 struct idmac_tx_desc *desc = NULL; 1347 struct idmac_tx_desc *desc = NULL;
@@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
1376 chan->chan_id != IDMAC_IC_7) 1353 chan->chan_id != IDMAC_IC_7)
1377 return NULL; 1354 return NULL;
1378 1355
1379 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { 1356 if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
1380 dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); 1357 dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
1381 return NULL; 1358 return NULL;
1382 } 1359 }
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 8ba4edc6185..4d6d4cf6694 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = {
835 }, 835 },
836}; 836};
837 837
838static int __init mpc_dma_init(void) 838module_platform_driver(mpc_dma_driver);
839{
840 return platform_driver_register(&mpc_dma_driver);
841}
842module_init(mpc_dma_init);
843
844static void __exit mpc_dma_exit(void)
845{
846 platform_driver_unregister(&mpc_dma_driver);
847}
848module_exit(mpc_dma_exit);
849 839
850MODULE_LICENSE("GPL"); 840MODULE_LICENSE("GPL");
851MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); 841MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index fc903c0ed23..b06cd4ca626 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -44,7 +44,6 @@
44#define HW_APBHX_CTRL0 0x000 44#define HW_APBHX_CTRL0 0x000
45#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) 45#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
46#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) 46#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
47#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
48#define BP_APBH_CTRL0_RESET_CHANNEL 16 47#define BP_APBH_CTRL0_RESET_CHANNEL 16
49#define HW_APBHX_CTRL1 0x010 48#define HW_APBHX_CTRL1 0x010
50#define HW_APBHX_CTRL2 0x020 49#define HW_APBHX_CTRL2 0x020
@@ -111,6 +110,7 @@ struct mxs_dma_chan {
111 int chan_irq; 110 int chan_irq;
112 struct mxs_dma_ccw *ccw; 111 struct mxs_dma_ccw *ccw;
113 dma_addr_t ccw_phys; 112 dma_addr_t ccw_phys;
113 int desc_count;
114 dma_cookie_t last_completed; 114 dma_cookie_t last_completed;
115 enum dma_status status; 115 enum dma_status status;
116 unsigned int flags; 116 unsigned int flags;
@@ -130,23 +130,6 @@ struct mxs_dma_engine {
130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
131}; 131};
132 132
133static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
134{
135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
136 int chan_id = mxs_chan->chan.chan_id;
137 int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
138
139 /* enable apbh channel clock */
140 if (dma_is_apbh()) {
141 if (apbh_is_old())
142 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
143 mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
144 else
145 writel(1 << chan_id,
146 mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
147 }
148}
149
150static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 133static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
151{ 134{
152 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
165 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 148 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
166 int chan_id = mxs_chan->chan.chan_id; 149 int chan_id = mxs_chan->chan.chan_id;
167 150
168 /* clkgate needs to be enabled before writing other registers */
169 mxs_dma_clkgate(mxs_chan, 1);
170
171 /* set cmd_addr up */ 151 /* set cmd_addr up */
172 writel(mxs_chan->ccw_phys, 152 writel(mxs_chan->ccw_phys,
173 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); 153 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
178 158
179static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 159static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
180{ 160{
181 /* disable apbh channel clock */
182 mxs_dma_clkgate(mxs_chan, 0);
183
184 mxs_chan->status = DMA_SUCCESS; 161 mxs_chan->status = DMA_SUCCESS;
185} 162}
186 163
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
268 /* 245 /*
269 * When both completion and error of termination bits set at the 246 * When both completion and error of termination bits set at the
270 * same time, we do not take it as an error. IOW, it only becomes 247 * same time, we do not take it as an error. IOW, it only becomes
271 * an error we need to handler here in case of ether it's (1) an bus 248 * an error we need to handle here in case of either it's (1) a bus
272 * error or (2) a termination error with no completion. 249 * error or (2) a termination error with no completion.
273 */ 250 */
274 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ 251 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
@@ -338,10 +315,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
338 if (ret) 315 if (ret)
339 goto err_clk; 316 goto err_clk;
340 317
341 /* clkgate needs to be enabled for reset to finish */
342 mxs_dma_clkgate(mxs_chan, 1);
343 mxs_dma_reset_chan(mxs_chan); 318 mxs_dma_reset_chan(mxs_chan);
344 mxs_dma_clkgate(mxs_chan, 0);
345 319
346 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 320 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
347 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 321 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -377,7 +351,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
377 351
378static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 352static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
379 struct dma_chan *chan, struct scatterlist *sgl, 353 struct dma_chan *chan, struct scatterlist *sgl,
380 unsigned int sg_len, enum dma_data_direction direction, 354 unsigned int sg_len, enum dma_transfer_direction direction,
381 unsigned long append) 355 unsigned long append)
382{ 356{
383 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 357 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
386 struct scatterlist *sg; 360 struct scatterlist *sg;
387 int i, j; 361 int i, j;
388 u32 *pio; 362 u32 *pio;
389 static int idx; 363 int idx = append ? mxs_chan->desc_count : 0;
390 364
391 if (mxs_chan->status == DMA_IN_PROGRESS && !append) 365 if (mxs_chan->status == DMA_IN_PROGRESS && !append)
392 return NULL; 366 return NULL;
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
417 idx = 0; 391 idx = 0;
418 } 392 }
419 393
420 if (direction == DMA_NONE) { 394 if (direction == DMA_TRANS_NONE) {
421 ccw = &mxs_chan->ccw[idx++]; 395 ccw = &mxs_chan->ccw[idx++];
422 pio = (u32 *) sgl; 396 pio = (u32 *) sgl;
423 397
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
450 ccw->bits |= CCW_CHAIN; 424 ccw->bits |= CCW_CHAIN;
451 ccw->bits |= CCW_HALT_ON_TERM; 425 ccw->bits |= CCW_HALT_ON_TERM;
452 ccw->bits |= CCW_TERM_FLUSH; 426 ccw->bits |= CCW_TERM_FLUSH;
453 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? 427 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
454 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, 428 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
455 COMMAND); 429 COMMAND);
456 430
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
462 } 436 }
463 } 437 }
464 } 438 }
439 mxs_chan->desc_count = idx;
465 440
466 return &mxs_chan->desc; 441 return &mxs_chan->desc;
467 442
@@ -472,7 +447,7 @@ err_out:
472 447
473static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( 448static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
474 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 449 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
475 size_t period_len, enum dma_data_direction direction) 450 size_t period_len, enum dma_transfer_direction direction)
476{ 451{
477 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 452 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
478 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 453 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
515 ccw->bits |= CCW_IRQ; 490 ccw->bits |= CCW_IRQ;
516 ccw->bits |= CCW_HALT_ON_TERM; 491 ccw->bits |= CCW_HALT_ON_TERM;
517 ccw->bits |= CCW_TERM_FLUSH; 492 ccw->bits |= CCW_TERM_FLUSH;
518 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? 493 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
519 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); 494 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
520 495
521 dma_addr += period_len; 496 dma_addr += period_len;
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
523 498
524 i++; 499 i++;
525 } 500 }
501 mxs_chan->desc_count = i;
526 502
527 return &mxs_chan->desc; 503 return &mxs_chan->desc;
528 504
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
539 515
540 switch (cmd) { 516 switch (cmd) {
541 case DMA_TERMINATE_ALL: 517 case DMA_TERMINATE_ALL:
542 mxs_dma_disable_chan(mxs_chan);
543 mxs_dma_reset_chan(mxs_chan); 518 mxs_dma_reset_chan(mxs_chan);
519 mxs_dma_disable_chan(mxs_chan);
544 break; 520 break;
545 case DMA_PAUSE: 521 case DMA_PAUSE:
546 mxs_dma_pause_chan(mxs_chan); 522 mxs_dma_pause_chan(mxs_chan);
@@ -580,7 +556,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
580 556
581 ret = clk_prepare_enable(mxs_dma->clk); 557 ret = clk_prepare_enable(mxs_dma->clk);
582 if (ret) 558 if (ret)
583 goto err_out; 559 return ret;
584 560
585 ret = mxs_reset_block(mxs_dma->base); 561 ret = mxs_reset_block(mxs_dma->base);
586 if (ret) 562 if (ret)
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
604 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, 580 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
605 mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); 581 mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
606 582
607 clk_disable_unprepare(mxs_dma->clk);
608
609 return 0;
610
611err_out: 583err_out:
584 clk_disable_unprepare(mxs_dma->clk);
612 return ret; 585 return ret;
613} 586}
614 587
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index a6d0e3dbed0..823f58179f9 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Topcliff PCH DMA controller driver 2 * Topcliff PCH DMA controller driver
3 * Copyright (c) 2010 Intel Corporation 3 * Copyright (c) 2010 Intel Corporation
4 * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. 4 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -99,7 +99,7 @@ struct pch_dma_desc {
99struct pch_dma_chan { 99struct pch_dma_chan {
100 struct dma_chan chan; 100 struct dma_chan chan;
101 void __iomem *membase; 101 void __iomem *membase;
102 enum dma_data_direction dir; 102 enum dma_transfer_direction dir;
103 struct tasklet_struct tasklet; 103 struct tasklet_struct tasklet;
104 unsigned long err_status; 104 unsigned long err_status;
105 105
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan)
224 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << 224 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
225 (DMA_CTL0_BITS_PER_CH * chan->chan_id)); 225 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
226 val &= mask_mode; 226 val &= mask_mode;
227 if (pd_chan->dir == DMA_TO_DEVICE) 227 if (pd_chan->dir == DMA_MEM_TO_DEV)
228 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + 228 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
229 DMA_CTL0_DIR_SHIFT_BITS); 229 DMA_CTL0_DIR_SHIFT_BITS);
230 else 230 else
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan)
242 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << 242 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
243 (DMA_CTL0_BITS_PER_CH * ch)); 243 (DMA_CTL0_BITS_PER_CH * ch));
244 val &= mask_mode; 244 val &= mask_mode;
245 if (pd_chan->dir == DMA_TO_DEVICE) 245 if (pd_chan->dir == DMA_MEM_TO_DEV)
246 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + 246 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
247 DMA_CTL0_DIR_SHIFT_BITS); 247 DMA_CTL0_DIR_SHIFT_BITS);
248 else 248 else
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan)
607 607
608static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, 608static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
609 struct scatterlist *sgl, unsigned int sg_len, 609 struct scatterlist *sgl, unsigned int sg_len,
610 enum dma_data_direction direction, unsigned long flags) 610 enum dma_transfer_direction direction, unsigned long flags)
611{ 611{
612 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 612 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
613 struct pch_dma_slave *pd_slave = chan->private; 613 struct pch_dma_slave *pd_slave = chan->private;
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
623 return NULL; 623 return NULL;
624 } 624 }
625 625
626 if (direction == DMA_FROM_DEVICE) 626 if (direction == DMA_DEV_TO_MEM)
627 reg = pd_slave->rx_reg; 627 reg = pd_slave->rx_reg;
628 else if (direction == DMA_TO_DEVICE) 628 else if (direction == DMA_MEM_TO_DEV)
629 reg = pd_slave->tx_reg; 629 reg = pd_slave->tx_reg;
630 else 630 else
631 return NULL; 631 return NULL;
@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
1018#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E 1018#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
1019#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 1019#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
1020#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B 1020#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
1021#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
1022#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
1021 1023
1022DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { 1024DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
1023 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, 1025 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
1030 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ 1032 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1031 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ 1033 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1032 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ 1034 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
1035 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
1036 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
1033 { 0, }, 1037 { 0, },
1034}; 1038};
1035 1039
@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void)
1057module_init(pch_dma_init); 1061module_init(pch_dma_init);
1058module_exit(pch_dma_exit); 1062module_exit(pch_dma_exit);
1059 1063
1060MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " 1064MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1061 "DMA controller driver"); 1065 "DMA controller driver");
1062MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); 1066MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1063MODULE_LICENSE("GPL v2"); 1067MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 09adcfcd953..b8ec03ee8e2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -350,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
350 case DMA_SLAVE_CONFIG: 350 case DMA_SLAVE_CONFIG:
351 slave_config = (struct dma_slave_config *)arg; 351 slave_config = (struct dma_slave_config *)arg;
352 352
353 if (slave_config->direction == DMA_TO_DEVICE) { 353 if (slave_config->direction == DMA_MEM_TO_DEV) {
354 if (slave_config->dst_addr) 354 if (slave_config->dst_addr)
355 pch->fifo_addr = slave_config->dst_addr; 355 pch->fifo_addr = slave_config->dst_addr;
356 if (slave_config->dst_addr_width) 356 if (slave_config->dst_addr_width)
357 pch->burst_sz = __ffs(slave_config->dst_addr_width); 357 pch->burst_sz = __ffs(slave_config->dst_addr_width);
358 if (slave_config->dst_maxburst) 358 if (slave_config->dst_maxburst)
359 pch->burst_len = slave_config->dst_maxburst; 359 pch->burst_len = slave_config->dst_maxburst;
360 } else if (slave_config->direction == DMA_FROM_DEVICE) { 360 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
361 if (slave_config->src_addr) 361 if (slave_config->src_addr)
362 pch->fifo_addr = slave_config->src_addr; 362 pch->fifo_addr = slave_config->src_addr;
363 if (slave_config->src_addr_width) 363 if (slave_config->src_addr_width)
@@ -621,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
621 621
622static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( 622static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
623 struct dma_chan *chan, dma_addr_t dma_addr, size_t len, 623 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
624 size_t period_len, enum dma_data_direction direction) 624 size_t period_len, enum dma_transfer_direction direction)
625{ 625{
626 struct dma_pl330_desc *desc; 626 struct dma_pl330_desc *desc;
627 struct dma_pl330_chan *pch = to_pchan(chan); 627 struct dma_pl330_chan *pch = to_pchan(chan);
@@ -636,14 +636,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
636 } 636 }
637 637
638 switch (direction) { 638 switch (direction) {
639 case DMA_TO_DEVICE: 639 case DMA_MEM_TO_DEV:
640 desc->rqcfg.src_inc = 1; 640 desc->rqcfg.src_inc = 1;
641 desc->rqcfg.dst_inc = 0; 641 desc->rqcfg.dst_inc = 0;
642 desc->req.rqtype = MEMTODEV; 642 desc->req.rqtype = MEMTODEV;
643 src = dma_addr; 643 src = dma_addr;
644 dst = pch->fifo_addr; 644 dst = pch->fifo_addr;
645 break; 645 break;
646 case DMA_FROM_DEVICE: 646 case DMA_DEV_TO_MEM:
647 desc->rqcfg.src_inc = 0; 647 desc->rqcfg.src_inc = 0;
648 desc->rqcfg.dst_inc = 1; 648 desc->rqcfg.dst_inc = 1;
649 desc->req.rqtype = DEVTOMEM; 649 desc->req.rqtype = DEVTOMEM;
@@ -710,7 +710,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
710 710
711static struct dma_async_tx_descriptor * 711static struct dma_async_tx_descriptor *
712pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 712pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
713 unsigned int sg_len, enum dma_data_direction direction, 713 unsigned int sg_len, enum dma_transfer_direction direction,
714 unsigned long flg) 714 unsigned long flg)
715{ 715{
716 struct dma_pl330_desc *first, *desc = NULL; 716 struct dma_pl330_desc *first, *desc = NULL;
@@ -759,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
759 else 759 else
760 list_add_tail(&desc->node, &first->node); 760 list_add_tail(&desc->node, &first->node);
761 761
762 if (direction == DMA_TO_DEVICE) { 762 if (direction == DMA_MEM_TO_DEV) {
763 desc->rqcfg.src_inc = 1; 763 desc->rqcfg.src_inc = 1;
764 desc->rqcfg.dst_inc = 0; 764 desc->rqcfg.dst_inc = 0;
765 desc->req.rqtype = MEMTODEV; 765 desc->req.rqtype = MEMTODEV;
@@ -834,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
834 834
835 amba_set_drvdata(adev, pdmac); 835 amba_set_drvdata(adev, pdmac);
836 836
837#ifdef CONFIG_PM_RUNTIME 837#ifndef CONFIG_PM_RUNTIME
838 /* to use the runtime PM helper functions */
839 pm_runtime_enable(&adev->dev);
840
841 /* enable the power domain */
842 if (pm_runtime_get_sync(&adev->dev)) {
843 dev_err(&adev->dev, "failed to get runtime pm\n");
844 ret = -ENODEV;
845 goto probe_err1;
846 }
847#else
848 /* enable dma clk */ 838 /* enable dma clk */
849 clk_enable(pdmac->clk); 839 clk_enable(pdmac->clk);
850#endif 840#endif
@@ -977,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev)
977 res = &adev->res; 967 res = &adev->res;
978 release_mem_region(res->start, resource_size(res)); 968 release_mem_region(res->start, resource_size(res));
979 969
980#ifdef CONFIG_PM_RUNTIME 970#ifndef CONFIG_PM_RUNTIME
981 pm_runtime_put(&adev->dev);
982 pm_runtime_disable(&adev->dev);
983#else
984 clk_disable(pdmac->clk); 971 clk_disable(pdmac->clk);
985#endif 972#endif
986 973
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 81809c2b46a..54043cd831c 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,7 +23,6 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 27#include <linux/pm_runtime.h>
29#include <linux/sh_dma.h> 28#include <linux/sh_dma.h>
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
57static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; 56static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
58 57
59static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 58static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
59static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
60
61static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
62{
63 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
64
65 __raw_writel(data, shdev->chan_reg +
66 shdev->pdata->channel[sh_dc->id].chclr_offset);
67}
60 68
61static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 69static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62{ 70{
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
129 137
130 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 138 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
131 139
140 if (shdev->pdata->chclr_present) {
141 int i;
142 for (i = 0; i < shdev->pdata->channel_num; i++) {
143 struct sh_dmae_chan *sh_chan = shdev->chan[i];
144 if (sh_chan)
145 chclr_write(sh_chan, 0);
146 }
147 }
148
132 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 149 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
133 150
134 dmaor = dmaor_read(shdev); 151 dmaor = dmaor_read(shdev);
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
139 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); 156 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
140 return -EIO; 157 return -EIO;
141 } 158 }
159 if (shdev->pdata->dmaor_init & ~dmaor)
160 dev_warn(shdev->common.dev,
161 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
162 dmaor, shdev->pdata->dmaor_init);
142 return 0; 163 return 0;
143} 164}
144 165
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
259 return 0; 280 return 0;
260} 281}
261 282
262static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
263
264static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 283static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
265{ 284{
266 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 285 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
340 sh_chan_xfer_ld_queue(sh_chan); 359 sh_chan_xfer_ld_queue(sh_chan);
341 sh_chan->pm_state = DMAE_PM_ESTABLISHED; 360 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
342 } 361 }
362 } else {
363 sh_chan->pm_state = DMAE_PM_PENDING;
343 } 364 }
344 365
345 spin_unlock_irq(&sh_chan->desc_lock); 366 spin_unlock_irq(&sh_chan->desc_lock);
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
479 * @sh_chan: DMA channel 500 * @sh_chan: DMA channel
480 * @flags: DMA transfer flags 501 * @flags: DMA transfer flags
481 * @dest: destination DMA address, incremented when direction equals 502 * @dest: destination DMA address, incremented when direction equals
482 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL 503 * DMA_DEV_TO_MEM
483 * @src: source DMA address, incremented when direction equals 504 * @src: source DMA address, incremented when direction equals
484 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL 505 * DMA_MEM_TO_DEV
485 * @len: DMA transfer length 506 * @len: DMA transfer length
486 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 507 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
487 * @direction: needed for slave DMA to decide which address to keep constant, 508 * @direction: needed for slave DMA to decide which address to keep constant,
488 * equals DMA_BIDIRECTIONAL for MEMCPY 509 * equals DMA_MEM_TO_MEM for MEMCPY
489 * Returns 0 or an error 510 * Returns 0 or an error
490 * Locks: called with desc_lock held 511 * Locks: called with desc_lock held
491 */ 512 */
492static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, 513static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
493 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, 514 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
494 struct sh_desc **first, enum dma_data_direction direction) 515 struct sh_desc **first, enum dma_transfer_direction direction)
495{ 516{
496 struct sh_desc *new; 517 struct sh_desc *new;
497 size_t copy_size; 518 size_t copy_size;
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
531 new->direction = direction; 552 new->direction = direction;
532 553
533 *len -= copy_size; 554 *len -= copy_size;
534 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) 555 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
535 *src += copy_size; 556 *src += copy_size;
536 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) 557 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
537 *dest += copy_size; 558 *dest += copy_size;
538 559
539 return new; 560 return new;
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
546 * converted to scatter-gather to guarantee consistent locking and a correct 567 * converted to scatter-gather to guarantee consistent locking and a correct
547 * list manipulation. For slave DMA direction carries the usual meaning, and, 568 * list manipulation. For slave DMA direction carries the usual meaning, and,
548 * logically, the SG list is RAM and the addr variable contains slave address, 569 * logically, the SG list is RAM and the addr variable contains slave address,
549 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL 570 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
550 * and the SG list contains only one element and points at the source buffer. 571 * and the SG list contains only one element and points at the source buffer.
551 */ 572 */
552static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, 573static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
553 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 574 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
554 enum dma_data_direction direction, unsigned long flags) 575 enum dma_transfer_direction direction, unsigned long flags)
555{ 576{
556 struct scatterlist *sg; 577 struct scatterlist *sg;
557 struct sh_desc *first = NULL, *new = NULL /* compiler... */; 578 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
592 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", 613 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
593 i, sg, len, (unsigned long long)sg_addr); 614 i, sg, len, (unsigned long long)sg_addr);
594 615
595 if (direction == DMA_FROM_DEVICE) 616 if (direction == DMA_DEV_TO_MEM)
596 new = sh_dmae_add_desc(sh_chan, flags, 617 new = sh_dmae_add_desc(sh_chan, flags,
597 &sg_addr, addr, &len, &first, 618 &sg_addr, addr, &len, &first,
598 direction); 619 direction);
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
646 sg_dma_address(&sg) = dma_src; 667 sg_dma_address(&sg) = dma_src;
647 sg_dma_len(&sg) = len; 668 sg_dma_len(&sg) = len;
648 669
649 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, 670 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
650 flags); 671 flags);
651} 672}
652 673
653static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( 674static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
654 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 675 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
655 enum dma_data_direction direction, unsigned long flags) 676 enum dma_transfer_direction direction, unsigned long flags)
656{ 677{
657 struct sh_dmae_slave *param; 678 struct sh_dmae_slave *param;
658 struct sh_dmae_chan *sh_chan; 679 struct sh_dmae_chan *sh_chan;
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data)
996 spin_lock_irq(&sh_chan->desc_lock); 1017 spin_lock_irq(&sh_chan->desc_lock);
997 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 1018 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
998 if (desc->mark == DESC_SUBMITTED && 1019 if (desc->mark == DESC_SUBMITTED &&
999 ((desc->direction == DMA_FROM_DEVICE && 1020 ((desc->direction == DMA_DEV_TO_MEM &&
1000 (desc->hw.dar + desc->hw.tcr) == dar_buf) || 1021 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
1001 (desc->hw.sar + desc->hw.tcr) == sar_buf)) { 1022 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
1002 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 1023 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1225 1246
1226 platform_set_drvdata(pdev, shdev); 1247 platform_set_drvdata(pdev, shdev);
1227 1248
1249 shdev->common.dev = &pdev->dev;
1250
1228 pm_runtime_enable(&pdev->dev); 1251 pm_runtime_enable(&pdev->dev);
1229 pm_runtime_get_sync(&pdev->dev); 1252 pm_runtime_get_sync(&pdev->dev);
1230 1253
@@ -1254,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1254 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1277 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1255 shdev->common.device_control = sh_dmae_control; 1278 shdev->common.device_control = sh_dmae_control;
1256 1279
1257 shdev->common.dev = &pdev->dev;
1258 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1280 /* Default transfer size of 32 bytes requires 32-byte alignment */
1259 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; 1281 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1260 1282
@@ -1435,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
1435#ifdef CONFIG_PM 1457#ifdef CONFIG_PM
1436static int sh_dmae_suspend(struct device *dev) 1458static int sh_dmae_suspend(struct device *dev)
1437{ 1459{
1438 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1439 int i;
1440
1441 for (i = 0; i < shdev->pdata->channel_num; i++) {
1442 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1443 if (sh_chan->descs_allocated)
1444 sh_chan->pm_error = pm_runtime_put_sync(dev);
1445 }
1446
1447 return 0; 1460 return 0;
1448} 1461}
1449 1462
1450static int sh_dmae_resume(struct device *dev) 1463static int sh_dmae_resume(struct device *dev)
1451{ 1464{
1452 struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1465 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1453 int i; 1466 int i, ret;
1467
1468 ret = sh_dmae_rst(shdev);
1469 if (ret < 0)
1470 dev_err(dev, "Failed to reset!\n");
1454 1471
1455 for (i = 0; i < shdev->pdata->channel_num; i++) { 1472 for (i = 0; i < shdev->pdata->channel_num; i++) {
1456 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1473 struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1459,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev)
1459 if (!sh_chan->descs_allocated) 1476 if (!sh_chan->descs_allocated)
1460 continue; 1477 continue;
1461 1478
1462 if (!sh_chan->pm_error)
1463 pm_runtime_get_sync(dev);
1464
1465 if (param) { 1479 if (param) {
1466 const struct sh_dmae_slave_config *cfg = param->config; 1480 const struct sh_dmae_slave_config *cfg = param->config;
1467 dmae_set_dmars(sh_chan, cfg->mid_rid); 1481 dmae_set_dmars(sh_chan, cfg->mid_rid);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
new file mode 100644
index 00000000000..2333810d168
--- /dev/null
+++ b/drivers/dma/sirf-dma.c
@@ -0,0 +1,707 @@
1/*
2 * DMA controller driver for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/slab.h>
15#include <linux/of_irq.h>
16#include <linux/of_address.h>
17#include <linux/of_device.h>
18#include <linux/of_platform.h>
19#include <linux/sirfsoc_dma.h>
20
21#define SIRFSOC_DMA_DESCRIPTORS 16
22#define SIRFSOC_DMA_CHANNELS 16
23
24#define SIRFSOC_DMA_CH_ADDR 0x00
25#define SIRFSOC_DMA_CH_XLEN 0x04
26#define SIRFSOC_DMA_CH_YLEN 0x08
27#define SIRFSOC_DMA_CH_CTRL 0x0C
28
29#define SIRFSOC_DMA_WIDTH_0 0x100
30#define SIRFSOC_DMA_CH_VALID 0x140
31#define SIRFSOC_DMA_CH_INT 0x144
32#define SIRFSOC_DMA_INT_EN 0x148
33#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
34
35#define SIRFSOC_DMA_MODE_CTRL_BIT 4
36#define SIRFSOC_DMA_DIR_CTRL_BIT 5
37
38/* xlen and dma_width register is in 4 bytes boundary */
39#define SIRFSOC_DMA_WORD_LEN 4
40
41struct sirfsoc_dma_desc {
42 struct dma_async_tx_descriptor desc;
43 struct list_head node;
44
45 /* SiRFprimaII 2D-DMA parameters */
46
47 int xlen; /* DMA xlen */
48 int ylen; /* DMA ylen */
49 int width; /* DMA width */
50 int dir;
51 bool cyclic; /* is loop DMA? */
52 u32 addr; /* DMA buffer address */
53};
54
55struct sirfsoc_dma_chan {
56 struct dma_chan chan;
57 struct list_head free;
58 struct list_head prepared;
59 struct list_head queued;
60 struct list_head active;
61 struct list_head completed;
62 dma_cookie_t completed_cookie;
63 unsigned long happened_cyclic;
64 unsigned long completed_cyclic;
65
66 /* Lock for this structure */
67 spinlock_t lock;
68
69 int mode;
70};
71
72struct sirfsoc_dma {
73 struct dma_device dma;
74 struct tasklet_struct tasklet;
75 struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
76 void __iomem *base;
77 int irq;
78};
79
80#define DRV_NAME "sirfsoc_dma"
81
82/* Convert struct dma_chan to struct sirfsoc_dma_chan */
83static inline
84struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
85{
86 return container_of(c, struct sirfsoc_dma_chan, chan);
87}
88
89/* Convert struct dma_chan to struct sirfsoc_dma */
90static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
91{
92 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
93 return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
94}
95
96/* Execute all queued DMA descriptors */
97static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
98{
99 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
100 int cid = schan->chan.chan_id;
101 struct sirfsoc_dma_desc *sdesc = NULL;
102
103 /*
104 * lock has been held by functions calling this, so we don't hold
105 * lock again
106 */
107
108 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
109 node);
110 /* Move the first queued descriptor to active list */
111 list_move_tail(&schan->queued, &schan->active);
112
113 /* Start the DMA transfer */
114 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
115 cid * 4);
116 writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
117 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
118 sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
119 writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
120 SIRFSOC_DMA_CH_XLEN);
121 writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
122 SIRFSOC_DMA_CH_YLEN);
123 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
124 (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
125
126 /*
127 * writel has an implict memory write barrier to make sure data is
128 * flushed into memory before starting DMA
129 */
130 writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
131
132 if (sdesc->cyclic) {
133 writel((1 << cid) | 1 << (cid + 16) |
134 readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
135 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
136 schan->happened_cyclic = schan->completed_cyclic = 0;
137 }
138}
139
140/* Interrupt handler */
141static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
142{
143 struct sirfsoc_dma *sdma = data;
144 struct sirfsoc_dma_chan *schan;
145 struct sirfsoc_dma_desc *sdesc = NULL;
146 u32 is;
147 int ch;
148
149 is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
150 while ((ch = fls(is) - 1) >= 0) {
151 is &= ~(1 << ch);
152 writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
153 schan = &sdma->channels[ch];
154
155 spin_lock(&schan->lock);
156
157 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
158 node);
159 if (!sdesc->cyclic) {
160 /* Execute queued descriptors */
161 list_splice_tail_init(&schan->active, &schan->completed);
162 if (!list_empty(&schan->queued))
163 sirfsoc_dma_execute(schan);
164 } else
165 schan->happened_cyclic++;
166
167 spin_unlock(&schan->lock);
168 }
169
170 /* Schedule tasklet */
171 tasklet_schedule(&sdma->tasklet);
172
173 return IRQ_HANDLED;
174}
175
176/* process completed descriptors */
177static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
178{
179 dma_cookie_t last_cookie = 0;
180 struct sirfsoc_dma_chan *schan;
181 struct sirfsoc_dma_desc *sdesc;
182 struct dma_async_tx_descriptor *desc;
183 unsigned long flags;
184 unsigned long happened_cyclic;
185 LIST_HEAD(list);
186 int i;
187
188 for (i = 0; i < sdma->dma.chancnt; i++) {
189 schan = &sdma->channels[i];
190
191 /* Get all completed descriptors */
192 spin_lock_irqsave(&schan->lock, flags);
193 if (!list_empty(&schan->completed)) {
194 list_splice_tail_init(&schan->completed, &list);
195 spin_unlock_irqrestore(&schan->lock, flags);
196
197 /* Execute callbacks and run dependencies */
198 list_for_each_entry(sdesc, &list, node) {
199 desc = &sdesc->desc;
200
201 if (desc->callback)
202 desc->callback(desc->callback_param);
203
204 last_cookie = desc->cookie;
205 dma_run_dependencies(desc);
206 }
207
208 /* Free descriptors */
209 spin_lock_irqsave(&schan->lock, flags);
210 list_splice_tail_init(&list, &schan->free);
211 schan->completed_cookie = last_cookie;
212 spin_unlock_irqrestore(&schan->lock, flags);
213 } else {
214 /* for cyclic channel, desc is always in active list */
215 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
216 node);
217
218 if (!sdesc || (sdesc && !sdesc->cyclic)) {
219 /* without active cyclic DMA */
220 spin_unlock_irqrestore(&schan->lock, flags);
221 continue;
222 }
223
224 /* cyclic DMA */
225 happened_cyclic = schan->happened_cyclic;
226 spin_unlock_irqrestore(&schan->lock, flags);
227
228 desc = &sdesc->desc;
229 while (happened_cyclic != schan->completed_cyclic) {
230 if (desc->callback)
231 desc->callback(desc->callback_param);
232 schan->completed_cyclic++;
233 }
234 }
235 }
236}
237
238/* DMA Tasklet */
239static void sirfsoc_dma_tasklet(unsigned long data)
240{
241 struct sirfsoc_dma *sdma = (void *)data;
242
243 sirfsoc_dma_process_completed(sdma);
244}
245
246/* Submit descriptor to hardware */
247static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
248{
249 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
250 struct sirfsoc_dma_desc *sdesc;
251 unsigned long flags;
252 dma_cookie_t cookie;
253
254 sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
255
256 spin_lock_irqsave(&schan->lock, flags);
257
258 /* Move descriptor to queue */
259 list_move_tail(&sdesc->node, &schan->queued);
260
261 /* Update cookie */
262 cookie = schan->chan.cookie + 1;
263 if (cookie <= 0)
264 cookie = 1;
265
266 schan->chan.cookie = cookie;
267 sdesc->desc.cookie = cookie;
268
269 spin_unlock_irqrestore(&schan->lock, flags);
270
271 return cookie;
272}
273
274static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
275 struct dma_slave_config *config)
276{
277 unsigned long flags;
278
279 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
280 (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
281 return -EINVAL;
282
283 spin_lock_irqsave(&schan->lock, flags);
284 schan->mode = (config->src_maxburst == 4 ? 1 : 0);
285 spin_unlock_irqrestore(&schan->lock, flags);
286
287 return 0;
288}
289
290static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
291{
292 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
293 int cid = schan->chan.chan_id;
294 unsigned long flags;
295
296 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
297 ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
298 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
299
300 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
301 & ~((1 << cid) | 1 << (cid + 16)),
302 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
303
304 spin_lock_irqsave(&schan->lock, flags);
305 list_splice_tail_init(&schan->active, &schan->free);
306 list_splice_tail_init(&schan->queued, &schan->free);
307 spin_unlock_irqrestore(&schan->lock, flags);
308
309 return 0;
310}
311
312static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
313 unsigned long arg)
314{
315 struct dma_slave_config *config;
316 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
317
318 switch (cmd) {
319 case DMA_TERMINATE_ALL:
320 return sirfsoc_dma_terminate_all(schan);
321 case DMA_SLAVE_CONFIG:
322 config = (struct dma_slave_config *)arg;
323 return sirfsoc_dma_slave_config(schan, config);
324
325 default:
326 break;
327 }
328
329 return -ENOSYS;
330}
331
332/* Alloc channel resources */
333static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
334{
335 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
336 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
337 struct sirfsoc_dma_desc *sdesc;
338 unsigned long flags;
339 LIST_HEAD(descs);
340 int i;
341
342 /* Alloc descriptors for this channel */
343 for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
344 sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
345 if (!sdesc) {
346 dev_notice(sdma->dma.dev, "Memory allocation error. "
347 "Allocated only %u descriptors\n", i);
348 break;
349 }
350
351 dma_async_tx_descriptor_init(&sdesc->desc, chan);
352 sdesc->desc.flags = DMA_CTRL_ACK;
353 sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
354
355 list_add_tail(&sdesc->node, &descs);
356 }
357
358 /* Return error only if no descriptors were allocated */
359 if (i == 0)
360 return -ENOMEM;
361
362 spin_lock_irqsave(&schan->lock, flags);
363
364 list_splice_tail_init(&descs, &schan->free);
365 spin_unlock_irqrestore(&schan->lock, flags);
366
367 return i;
368}
369
370/* Free channel resources */
371static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
372{
373 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
374 struct sirfsoc_dma_desc *sdesc, *tmp;
375 unsigned long flags;
376 LIST_HEAD(descs);
377
378 spin_lock_irqsave(&schan->lock, flags);
379
380 /* Channel must be idle */
381 BUG_ON(!list_empty(&schan->prepared));
382 BUG_ON(!list_empty(&schan->queued));
383 BUG_ON(!list_empty(&schan->active));
384 BUG_ON(!list_empty(&schan->completed));
385
386 /* Move data */
387 list_splice_tail_init(&schan->free, &descs);
388
389 spin_unlock_irqrestore(&schan->lock, flags);
390
391 /* Free descriptors */
392 list_for_each_entry_safe(sdesc, tmp, &descs, node)
393 kfree(sdesc);
394}
395
396/* Send pending descriptor to hardware */
397static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
398{
399 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
400 unsigned long flags;
401
402 spin_lock_irqsave(&schan->lock, flags);
403
404 if (list_empty(&schan->active) && !list_empty(&schan->queued))
405 sirfsoc_dma_execute(schan);
406
407 spin_unlock_irqrestore(&schan->lock, flags);
408}
409
410/* Check request completion status */
411static enum dma_status
412sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
413 struct dma_tx_state *txstate)
414{
415 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
416 unsigned long flags;
417 dma_cookie_t last_used;
418 dma_cookie_t last_complete;
419
420 spin_lock_irqsave(&schan->lock, flags);
421 last_used = schan->chan.cookie;
422 last_complete = schan->completed_cookie;
423 spin_unlock_irqrestore(&schan->lock, flags);
424
425 dma_set_tx_state(txstate, last_complete, last_used, 0);
426 return dma_async_is_complete(cookie, last_complete, last_used);
427}
428
429static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
430 struct dma_chan *chan, struct dma_interleaved_template *xt,
431 unsigned long flags)
432{
433 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
434 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
435 struct sirfsoc_dma_desc *sdesc = NULL;
436 unsigned long iflags;
437 int ret;
438
439 if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
440 ret = -EINVAL;
441 goto err_dir;
442 }
443
444 /* Get free descriptor */
445 spin_lock_irqsave(&schan->lock, iflags);
446 if (!list_empty(&schan->free)) {
447 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
448 node);
449 list_del(&sdesc->node);
450 }
451 spin_unlock_irqrestore(&schan->lock, iflags);
452
453 if (!sdesc) {
454 /* try to free completed descriptors */
455 sirfsoc_dma_process_completed(sdma);
456 ret = 0;
457 goto no_desc;
458 }
459
460 /* Place descriptor in prepared list */
461 spin_lock_irqsave(&schan->lock, iflags);
462
463 /*
464 * Number of chunks in a frame can only be 1 for prima2
465 * and ylen (number of frame - 1) must be at least 0
466 */
467 if ((xt->frame_size == 1) && (xt->numf > 0)) {
468 sdesc->cyclic = 0;
469 sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
470 sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
471 SIRFSOC_DMA_WORD_LEN;
472 sdesc->ylen = xt->numf - 1;
473 if (xt->dir == DMA_MEM_TO_DEV) {
474 sdesc->addr = xt->src_start;
475 sdesc->dir = 1;
476 } else {
477 sdesc->addr = xt->dst_start;
478 sdesc->dir = 0;
479 }
480
481 list_add_tail(&sdesc->node, &schan->prepared);
482 } else {
483 pr_err("sirfsoc DMA Invalid xfer\n");
484 ret = -EINVAL;
485 goto err_xfer;
486 }
487 spin_unlock_irqrestore(&schan->lock, iflags);
488
489 return &sdesc->desc;
490err_xfer:
491 spin_unlock_irqrestore(&schan->lock, iflags);
492no_desc:
493err_dir:
494 return ERR_PTR(ret);
495}
496
497static struct dma_async_tx_descriptor *
498sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
499 size_t buf_len, size_t period_len,
500 enum dma_transfer_direction direction)
501{
502 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
503 struct sirfsoc_dma_desc *sdesc = NULL;
504 unsigned long iflags;
505
506 /*
507 * we only support cycle transfer with 2 period
508 * If the X-length is set to 0, it would be the loop mode.
509 * The DMA address keeps increasing until reaching the end of a loop
510 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
511 * the DMA address goes back to the beginning of this area.
512 * In loop mode, the DMA data region is divided into two parts, BUFA
513 * and BUFB. DMA controller generates interrupts twice in each loop:
514 * when the DMA address reaches the end of BUFA or the end of the
515 * BUFB
516 */
517 if (buf_len != 2 * period_len)
518 return ERR_PTR(-EINVAL);
519
520 /* Get free descriptor */
521 spin_lock_irqsave(&schan->lock, iflags);
522 if (!list_empty(&schan->free)) {
523 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
524 node);
525 list_del(&sdesc->node);
526 }
527 spin_unlock_irqrestore(&schan->lock, iflags);
528
529 if (!sdesc)
530 return 0;
531
532 /* Place descriptor in prepared list */
533 spin_lock_irqsave(&schan->lock, iflags);
534 sdesc->addr = addr;
535 sdesc->cyclic = 1;
536 sdesc->xlen = 0;
537 sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
538 sdesc->width = 1;
539 list_add_tail(&sdesc->node, &schan->prepared);
540 spin_unlock_irqrestore(&schan->lock, iflags);
541
542 return &sdesc->desc;
543}
544
545/*
546 * The DMA controller consists of 16 independent DMA channels.
547 * Each channel is allocated to a different function
548 */
549bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
550{
551 unsigned int ch_nr = (unsigned int) chan_id;
552
553 if (ch_nr == chan->chan_id +
554 chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
555 return true;
556
557 return false;
558}
559EXPORT_SYMBOL(sirfsoc_dma_filter_id);
560
561static int __devinit sirfsoc_dma_probe(struct platform_device *op)
562{
563 struct device_node *dn = op->dev.of_node;
564 struct device *dev = &op->dev;
565 struct dma_device *dma;
566 struct sirfsoc_dma *sdma;
567 struct sirfsoc_dma_chan *schan;
568 struct resource res;
569 ulong regs_start, regs_size;
570 u32 id;
571 int ret, i;
572
573 sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
574 if (!sdma) {
575 dev_err(dev, "Memory exhausted!\n");
576 return -ENOMEM;
577 }
578
579 if (of_property_read_u32(dn, "cell-index", &id)) {
580 dev_err(dev, "Fail to get DMAC index\n");
581 ret = -ENODEV;
582 goto free_mem;
583 }
584
585 sdma->irq = irq_of_parse_and_map(dn, 0);
586 if (sdma->irq == NO_IRQ) {
587 dev_err(dev, "Error mapping IRQ!\n");
588 ret = -EINVAL;
589 goto free_mem;
590 }
591
592 ret = of_address_to_resource(dn, 0, &res);
593 if (ret) {
594 dev_err(dev, "Error parsing memory region!\n");
595 goto free_mem;
596 }
597
598 regs_start = res.start;
599 regs_size = resource_size(&res);
600
601 sdma->base = devm_ioremap(dev, regs_start, regs_size);
602 if (!sdma->base) {
603 dev_err(dev, "Error mapping memory region!\n");
604 ret = -ENOMEM;
605 goto irq_dispose;
606 }
607
608 ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
609 sdma);
610 if (ret) {
611 dev_err(dev, "Error requesting IRQ!\n");
612 ret = -EINVAL;
613 goto unmap_mem;
614 }
615
616 dma = &sdma->dma;
617 dma->dev = dev;
618 dma->chancnt = SIRFSOC_DMA_CHANNELS;
619
620 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
621 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
622 dma->device_issue_pending = sirfsoc_dma_issue_pending;
623 dma->device_control = sirfsoc_dma_control;
624 dma->device_tx_status = sirfsoc_dma_tx_status;
625 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
626 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
627
628 INIT_LIST_HEAD(&dma->channels);
629 dma_cap_set(DMA_SLAVE, dma->cap_mask);
630 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
631 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
632 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
633
634 for (i = 0; i < dma->chancnt; i++) {
635 schan = &sdma->channels[i];
636
637 schan->chan.device = dma;
638 schan->chan.cookie = 1;
639 schan->completed_cookie = schan->chan.cookie;
640
641 INIT_LIST_HEAD(&schan->free);
642 INIT_LIST_HEAD(&schan->prepared);
643 INIT_LIST_HEAD(&schan->queued);
644 INIT_LIST_HEAD(&schan->active);
645 INIT_LIST_HEAD(&schan->completed);
646
647 spin_lock_init(&schan->lock);
648 list_add_tail(&schan->chan.device_node, &dma->channels);
649 }
650
651 tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
652
653 /* Register DMA engine */
654 dev_set_drvdata(dev, sdma);
655 ret = dma_async_device_register(dma);
656 if (ret)
657 goto free_irq;
658
659 dev_info(dev, "initialized SIRFSOC DMAC driver\n");
660
661 return 0;
662
663free_irq:
664 devm_free_irq(dev, sdma->irq, sdma);
665irq_dispose:
666 irq_dispose_mapping(sdma->irq);
667unmap_mem:
668 iounmap(sdma->base);
669free_mem:
670 devm_kfree(dev, sdma);
671 return ret;
672}
673
674static int __devexit sirfsoc_dma_remove(struct platform_device *op)
675{
676 struct device *dev = &op->dev;
677 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
678
679 dma_async_device_unregister(&sdma->dma);
680 devm_free_irq(dev, sdma->irq, sdma);
681 irq_dispose_mapping(sdma->irq);
682 iounmap(sdma->base);
683 devm_kfree(dev, sdma);
684 return 0;
685}
686
687static struct of_device_id sirfsoc_dma_match[] = {
688 { .compatible = "sirf,prima2-dmac", },
689 {},
690};
691
692static struct platform_driver sirfsoc_dma_driver = {
693 .probe = sirfsoc_dma_probe,
694 .remove = __devexit_p(sirfsoc_dma_remove),
695 .driver = {
696 .name = DRV_NAME,
697 .owner = THIS_MODULE,
698 .of_match_table = sirfsoc_dma_match,
699 },
700};
701
702module_platform_driver(sirfsoc_dma_driver);
703
704MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
705 "Barry Song <baohua.song@csr.com>");
706MODULE_DESCRIPTION("SIRFSOC DMA control driver");
707MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 13259cad0ce..cc5ecbc067a 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,8 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/pm.h>
18#include <linux/pm_runtime.h>
17#include <linux/err.h> 19#include <linux/err.h>
18#include <linux/amba/bus.h> 20#include <linux/amba/bus.h>
19 21
@@ -32,6 +34,9 @@
32/* Maximum iterations taken before giving up suspending a channel */ 34/* Maximum iterations taken before giving up suspending a channel */
33#define D40_SUSPEND_MAX_IT 500 35#define D40_SUSPEND_MAX_IT 500
34 36
37/* Milliseconds */
38#define DMA40_AUTOSUSPEND_DELAY 100
39
35/* Hardware requirement on LCLA alignment */ 40/* Hardware requirement on LCLA alignment */
36#define LCLA_ALIGNMENT 0x40000 41#define LCLA_ALIGNMENT 0x40000
37 42
@@ -62,6 +67,55 @@ enum d40_command {
62 D40_DMA_SUSPENDED = 3 67 D40_DMA_SUSPENDED = 3
63}; 68};
64 69
70/*
71 * These are the registers that has to be saved and later restored
72 * when the DMA hw is powered off.
73 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
74 */
75static u32 d40_backup_regs[] = {
76 D40_DREG_LCPA,
77 D40_DREG_LCLA,
78 D40_DREG_PRMSE,
79 D40_DREG_PRMSO,
80 D40_DREG_PRMOE,
81 D40_DREG_PRMOO,
82};
83
84#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
85
86/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
87static u32 d40_backup_regs_v3[] = {
88 D40_DREG_PSEG1,
89 D40_DREG_PSEG2,
90 D40_DREG_PSEG3,
91 D40_DREG_PSEG4,
92 D40_DREG_PCEG1,
93 D40_DREG_PCEG2,
94 D40_DREG_PCEG3,
95 D40_DREG_PCEG4,
96 D40_DREG_RSEG1,
97 D40_DREG_RSEG2,
98 D40_DREG_RSEG3,
99 D40_DREG_RSEG4,
100 D40_DREG_RCEG1,
101 D40_DREG_RCEG2,
102 D40_DREG_RCEG3,
103 D40_DREG_RCEG4,
104};
105
106#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
107
108static u32 d40_backup_regs_chan[] = {
109 D40_CHAN_REG_SSCFG,
110 D40_CHAN_REG_SSELT,
111 D40_CHAN_REG_SSPTR,
112 D40_CHAN_REG_SSLNK,
113 D40_CHAN_REG_SDCFG,
114 D40_CHAN_REG_SDELT,
115 D40_CHAN_REG_SDPTR,
116 D40_CHAN_REG_SDLNK,
117};
118
65/** 119/**
66 * struct d40_lli_pool - Structure for keeping LLIs in memory 120 * struct d40_lli_pool - Structure for keeping LLIs in memory
67 * 121 *
@@ -96,7 +150,7 @@ struct d40_lli_pool {
96 * during a transfer. 150 * during a transfer.
97 * @node: List entry. 151 * @node: List entry.
98 * @is_in_client_list: true if the client owns this descriptor. 152 * @is_in_client_list: true if the client owns this descriptor.
99 * the previous one. 153 * @cyclic: true if this is a cyclic job
100 * 154 *
101 * This descriptor is used for both logical and physical transfers. 155 * This descriptor is used for both logical and physical transfers.
102 */ 156 */
@@ -143,6 +197,7 @@ struct d40_lcla_pool {
143 * channels. 197 * channels.
144 * 198 *
145 * @lock: A lock protection this entity. 199 * @lock: A lock protection this entity.
200 * @reserved: True if used by secure world or otherwise.
146 * @num: The physical channel number of this entity. 201 * @num: The physical channel number of this entity.
147 * @allocated_src: Bit mapped to show which src event line's are mapped to 202 * @allocated_src: Bit mapped to show which src event line's are mapped to
148 * this physical channel. Can also be free or physically allocated. 203 * this physical channel. Can also be free or physically allocated.
@@ -152,6 +207,7 @@ struct d40_lcla_pool {
152 */ 207 */
153struct d40_phy_res { 208struct d40_phy_res {
154 spinlock_t lock; 209 spinlock_t lock;
210 bool reserved;
155 int num; 211 int num;
156 u32 allocated_src; 212 u32 allocated_src;
157 u32 allocated_dst; 213 u32 allocated_dst;
@@ -185,7 +241,6 @@ struct d40_base;
185 * @src_def_cfg: Default cfg register setting for src. 241 * @src_def_cfg: Default cfg register setting for src.
186 * @dst_def_cfg: Default cfg register setting for dst. 242 * @dst_def_cfg: Default cfg register setting for dst.
187 * @log_def: Default logical channel settings. 243 * @log_def: Default logical channel settings.
188 * @lcla: Space for one dst src pair for logical channel transfers.
189 * @lcpa: Pointer to dst and src lcpa settings. 244 * @lcpa: Pointer to dst and src lcpa settings.
190 * @runtime_addr: runtime configured address. 245 * @runtime_addr: runtime configured address.
191 * @runtime_direction: runtime configured direction. 246 * @runtime_direction: runtime configured direction.
@@ -217,7 +272,7 @@ struct d40_chan {
217 struct d40_log_lli_full *lcpa; 272 struct d40_log_lli_full *lcpa;
218 /* Runtime reconfiguration */ 273 /* Runtime reconfiguration */
219 dma_addr_t runtime_addr; 274 dma_addr_t runtime_addr;
220 enum dma_data_direction runtime_direction; 275 enum dma_transfer_direction runtime_direction;
221}; 276};
222 277
223/** 278/**
@@ -241,6 +296,7 @@ struct d40_chan {
241 * @dma_both: dma_device channels that can do both memcpy and slave transfers. 296 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
242 * @dma_slave: dma_device channels that can do only do slave transfers. 297 * @dma_slave: dma_device channels that can do only do slave transfers.
243 * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 298 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
299 * @phy_chans: Room for all possible physical channels in system.
244 * @log_chans: Room for all possible logical channels in system. 300 * @log_chans: Room for all possible logical channels in system.
245 * @lookup_log_chans: Used to map interrupt number to logical channel. Points 301 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
246 * to log_chans entries. 302 * to log_chans entries.
@@ -248,12 +304,20 @@ struct d40_chan {
248 * to phy_chans entries. 304 * to phy_chans entries.
249 * @plat_data: Pointer to provided platform_data which is the driver 305 * @plat_data: Pointer to provided platform_data which is the driver
250 * configuration. 306 * configuration.
307 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
251 * @phy_res: Vector containing all physical channels. 308 * @phy_res: Vector containing all physical channels.
252 * @lcla_pool: lcla pool settings and data. 309 * @lcla_pool: lcla pool settings and data.
253 * @lcpa_base: The virtual mapped address of LCPA. 310 * @lcpa_base: The virtual mapped address of LCPA.
254 * @phy_lcpa: The physical address of the LCPA. 311 * @phy_lcpa: The physical address of the LCPA.
255 * @lcpa_size: The size of the LCPA area. 312 * @lcpa_size: The size of the LCPA area.
256 * @desc_slab: cache for descriptors. 313 * @desc_slab: cache for descriptors.
314 * @reg_val_backup: Here the values of some hardware registers are stored
315 * before the DMA is powered off. They are restored when the power is back on.
316 * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
317 * later.
318 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
319 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
320 * @initialized: true if the dma has been initialized
257 */ 321 */
258struct d40_base { 322struct d40_base {
259 spinlock_t interrupt_lock; 323 spinlock_t interrupt_lock;
@@ -275,6 +339,7 @@ struct d40_base {
275 struct d40_chan **lookup_log_chans; 339 struct d40_chan **lookup_log_chans;
276 struct d40_chan **lookup_phy_chans; 340 struct d40_chan **lookup_phy_chans;
277 struct stedma40_platform_data *plat_data; 341 struct stedma40_platform_data *plat_data;
342 struct regulator *lcpa_regulator;
278 /* Physical half channels */ 343 /* Physical half channels */
279 struct d40_phy_res *phy_res; 344 struct d40_phy_res *phy_res;
280 struct d40_lcla_pool lcla_pool; 345 struct d40_lcla_pool lcla_pool;
@@ -282,6 +347,11 @@ struct d40_base {
282 dma_addr_t phy_lcpa; 347 dma_addr_t phy_lcpa;
283 resource_size_t lcpa_size; 348 resource_size_t lcpa_size;
284 struct kmem_cache *desc_slab; 349 struct kmem_cache *desc_slab;
350 u32 reg_val_backup[BACKUP_REGS_SZ];
351 u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
352 u32 *reg_val_backup_chan;
353 u16 gcc_pwr_off_mask;
354 bool initialized;
285}; 355};
286 356
287/** 357/**
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
479 struct d40_desc *d; 549 struct d40_desc *d;
480 struct d40_desc *_d; 550 struct d40_desc *_d;
481 551
482 list_for_each_entry_safe(d, _d, &d40c->client, node) 552 list_for_each_entry_safe(d, _d, &d40c->client, node) {
483 if (async_tx_test_ack(&d->txd)) { 553 if (async_tx_test_ack(&d->txd)) {
484 d40_desc_remove(d); 554 d40_desc_remove(d);
485 desc = d; 555 desc = d;
486 memset(desc, 0, sizeof(*desc)); 556 memset(desc, 0, sizeof(*desc));
487 break; 557 break;
488 } 558 }
559 }
489 } 560 }
490 561
491 if (!desc) 562 if (!desc)
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
536 bool cyclic = desc->cyclic; 607 bool cyclic = desc->cyclic;
537 int curr_lcla = -EINVAL; 608 int curr_lcla = -EINVAL;
538 int first_lcla = 0; 609 int first_lcla = 0;
610 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
539 bool linkback; 611 bool linkback;
540 612
541 /* 613 /*
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
608 &lli->src[lli_current], 680 &lli->src[lli_current],
609 next_lcla, flags); 681 next_lcla, flags);
610 682
611 dma_sync_single_range_for_device(chan->base->dev, 683 /*
612 pool->dma_addr, lcla_offset, 684 * Cache maintenance is not needed if lcla is
613 2 * sizeof(struct d40_log_lli), 685 * mapped in esram
614 DMA_TO_DEVICE); 686 */
615 687 if (!use_esram_lcla) {
688 dma_sync_single_range_for_device(chan->base->dev,
689 pool->dma_addr, lcla_offset,
690 2 * sizeof(struct d40_log_lli),
691 DMA_TO_DEVICE);
692 }
616 curr_lcla = next_lcla; 693 curr_lcla = next_lcla;
617 694
618 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { 695 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
740 return len; 817 return len;
741} 818}
742 819
743/* Support functions for logical channels */ 820
821#ifdef CONFIG_PM
822static void dma40_backup(void __iomem *baseaddr, u32 *backup,
823 u32 *regaddr, int num, bool save)
824{
825 int i;
826
827 for (i = 0; i < num; i++) {
828 void __iomem *addr = baseaddr + regaddr[i];
829
830 if (save)
831 backup[i] = readl_relaxed(addr);
832 else
833 writel_relaxed(backup[i], addr);
834 }
835}
836
837static void d40_save_restore_registers(struct d40_base *base, bool save)
838{
839 int i;
840
841 /* Save/Restore channel specific registers */
842 for (i = 0; i < base->num_phy_chans; i++) {
843 void __iomem *addr;
844 int idx;
845
846 if (base->phy_res[i].reserved)
847 continue;
848
849 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
850 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
851
852 dma40_backup(addr, &base->reg_val_backup_chan[idx],
853 d40_backup_regs_chan,
854 ARRAY_SIZE(d40_backup_regs_chan),
855 save);
856 }
857
858 /* Save/Restore global registers */
859 dma40_backup(base->virtbase, base->reg_val_backup,
860 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
861 save);
862
863 /* Save/Restore registers only existing on dma40 v3 and later */
864 if (base->rev >= 3)
865 dma40_backup(base->virtbase, base->reg_val_backup_v3,
866 d40_backup_regs_v3,
867 ARRAY_SIZE(d40_backup_regs_v3),
868 save);
869}
870#else
871static void d40_save_restore_registers(struct d40_base *base, bool save)
872{
873}
874#endif
744 875
745static int d40_channel_execute_command(struct d40_chan *d40c, 876static int d40_channel_execute_command(struct d40_chan *d40c,
746 enum d40_command command) 877 enum d40_command command)
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c)
973 /* Set LIDX for lcla */ 1104 /* Set LIDX for lcla */
974 writel(lidx, chanbase + D40_CHAN_REG_SSELT); 1105 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
975 writel(lidx, chanbase + D40_CHAN_REG_SDELT); 1106 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1107
1108 /* Clear LNK which will be used by d40_chan_has_events() */
1109 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1110 writel(0, chanbase + D40_CHAN_REG_SDLNK);
976 } 1111 }
977} 1112}
978 1113
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c)
1013 if (!d40c->busy) 1148 if (!d40c->busy)
1014 return 0; 1149 return 0;
1015 1150
1151 pm_runtime_get_sync(d40c->base->dev);
1016 spin_lock_irqsave(&d40c->lock, flags); 1152 spin_lock_irqsave(&d40c->lock, flags);
1017 1153
1018 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1154 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c)
1025 D40_DMA_RUN); 1161 D40_DMA_RUN);
1026 } 1162 }
1027 } 1163 }
1028 1164 pm_runtime_mark_last_busy(d40c->base->dev);
1165 pm_runtime_put_autosuspend(d40c->base->dev);
1029 spin_unlock_irqrestore(&d40c->lock, flags); 1166 spin_unlock_irqrestore(&d40c->lock, flags);
1030 return res; 1167 return res;
1031} 1168}
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c)
1039 return 0; 1176 return 0;
1040 1177
1041 spin_lock_irqsave(&d40c->lock, flags); 1178 spin_lock_irqsave(&d40c->lock, flags);
1042 1179 pm_runtime_get_sync(d40c->base->dev);
1043 if (d40c->base->rev == 0) 1180 if (d40c->base->rev == 0)
1044 if (chan_is_logical(d40c)) { 1181 if (chan_is_logical(d40c)) {
1045 res = d40_channel_execute_command(d40c, 1182 res = d40_channel_execute_command(d40c,
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c)
1057 } 1194 }
1058 1195
1059no_suspend: 1196no_suspend:
1197 pm_runtime_mark_last_busy(d40c->base->dev);
1198 pm_runtime_put_autosuspend(d40c->base->dev);
1060 spin_unlock_irqrestore(&d40c->lock, flags); 1199 spin_unlock_irqrestore(&d40c->lock, flags);
1061 return res; 1200 return res;
1062} 1201}
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1129 d40d = d40_first_queued(d40c); 1268 d40d = d40_first_queued(d40c);
1130 1269
1131 if (d40d != NULL) { 1270 if (d40d != NULL) {
1132 d40c->busy = true; 1271 if (!d40c->busy)
1272 d40c->busy = true;
1273
1274 pm_runtime_get_sync(d40c->base->dev);
1133 1275
1134 /* Remove from queue */ 1276 /* Remove from queue */
1135 d40_desc_remove(d40d); 1277 d40_desc_remove(d40d);
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
1190 1332
1191 if (d40_queue_start(d40c) == NULL) 1333 if (d40_queue_start(d40c) == NULL)
1192 d40c->busy = false; 1334 d40c->busy = false;
1335 pm_runtime_mark_last_busy(d40c->base->dev);
1336 pm_runtime_put_autosuspend(d40c->base->dev);
1193 } 1337 }
1194 1338
1195 d40c->pending_tx++; 1339 d40c->pending_tx++;
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
1405 return res; 1549 return res;
1406} 1550}
1407 1551
1408static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, 1552static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1409 int log_event_line, bool is_log) 1553 bool is_src, int log_event_line, bool is_log,
1554 bool *first_user)
1410{ 1555{
1411 unsigned long flags; 1556 unsigned long flags;
1412 spin_lock_irqsave(&phy->lock, flags); 1557 spin_lock_irqsave(&phy->lock, flags);
1558
1559 *first_user = ((phy->allocated_src | phy->allocated_dst)
1560 == D40_ALLOC_FREE);
1561
1413 if (!is_log) { 1562 if (!is_log) {
1414 /* Physical interrupts are masked per physical full channel */ 1563 /* Physical interrupts are masked per physical full channel */
1415 if (phy->allocated_src == D40_ALLOC_FREE && 1564 if (phy->allocated_src == D40_ALLOC_FREE &&
@@ -1490,7 +1639,7 @@ out:
1490 return is_free; 1639 return is_free;
1491} 1640}
1492 1641
1493static int d40_allocate_channel(struct d40_chan *d40c) 1642static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1494{ 1643{
1495 int dev_type; 1644 int dev_type;
1496 int event_group; 1645 int event_group;
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
1526 for (i = 0; i < d40c->base->num_phy_chans; i++) { 1675 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1527 1676
1528 if (d40_alloc_mask_set(&phys[i], is_src, 1677 if (d40_alloc_mask_set(&phys[i], is_src,
1529 0, is_log)) 1678 0, is_log,
1679 first_phy_user))
1530 goto found_phy; 1680 goto found_phy;
1531 } 1681 }
1532 } else 1682 } else
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
1536 if (d40_alloc_mask_set(&phys[i], 1686 if (d40_alloc_mask_set(&phys[i],
1537 is_src, 1687 is_src,
1538 0, 1688 0,
1539 is_log)) 1689 is_log,
1690 first_phy_user))
1540 goto found_phy; 1691 goto found_phy;
1541 } 1692 }
1542 } 1693 }
@@ -1552,6 +1703,25 @@ found_phy:
1552 /* Find logical channel */ 1703 /* Find logical channel */
1553 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1704 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1554 int phy_num = j + event_group * 2; 1705 int phy_num = j + event_group * 2;
1706
1707 if (d40c->dma_cfg.use_fixed_channel) {
1708 i = d40c->dma_cfg.phy_channel;
1709
1710 if ((i != phy_num) && (i != phy_num + 1)) {
1711 dev_err(chan2dev(d40c),
1712 "invalid fixed phy channel %d\n", i);
1713 return -EINVAL;
1714 }
1715
1716 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1717 is_log, first_phy_user))
1718 goto found_log;
1719
1720 dev_err(chan2dev(d40c),
1721 "could not allocate fixed phy channel %d\n", i);
1722 return -EINVAL;
1723 }
1724
1555 /* 1725 /*
1556 * Spread logical channels across all available physical rather 1726 * Spread logical channels across all available physical rather
1557 * than pack every logical channel at the first available phy 1727 * than pack every logical channel at the first available phy
@@ -1560,13 +1730,15 @@ found_phy:
1560 if (is_src) { 1730 if (is_src) {
1561 for (i = phy_num; i < phy_num + 2; i++) { 1731 for (i = phy_num; i < phy_num + 2; i++) {
1562 if (d40_alloc_mask_set(&phys[i], is_src, 1732 if (d40_alloc_mask_set(&phys[i], is_src,
1563 event_line, is_log)) 1733 event_line, is_log,
1734 first_phy_user))
1564 goto found_log; 1735 goto found_log;
1565 } 1736 }
1566 } else { 1737 } else {
1567 for (i = phy_num + 1; i >= phy_num; i--) { 1738 for (i = phy_num + 1; i >= phy_num; i--) {
1568 if (d40_alloc_mask_set(&phys[i], is_src, 1739 if (d40_alloc_mask_set(&phys[i], is_src,
1569 event_line, is_log)) 1740 event_line, is_log,
1741 first_phy_user))
1570 goto found_log; 1742 goto found_log;
1571 } 1743 }
1572 } 1744 }
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c)
1643 return -EINVAL; 1815 return -EINVAL;
1644 } 1816 }
1645 1817
1818 pm_runtime_get_sync(d40c->base->dev);
1646 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1819 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1647 if (res) { 1820 if (res) {
1648 chan_err(d40c, "suspend failed\n"); 1821 chan_err(d40c, "suspend failed\n");
1649 return res; 1822 goto out;
1650 } 1823 }
1651 1824
1652 if (chan_is_logical(d40c)) { 1825 if (chan_is_logical(d40c)) {
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c)
1664 if (d40_chan_has_events(d40c)) { 1837 if (d40_chan_has_events(d40c)) {
1665 res = d40_channel_execute_command(d40c, 1838 res = d40_channel_execute_command(d40c,
1666 D40_DMA_RUN); 1839 D40_DMA_RUN);
1667 if (res) { 1840 if (res)
1668 chan_err(d40c, 1841 chan_err(d40c,
1669 "Executing RUN command\n"); 1842 "Executing RUN command\n");
1670 return res;
1671 }
1672 } 1843 }
1673 return 0; 1844 goto out;
1674 } 1845 }
1675 } else { 1846 } else {
1676 (void) d40_alloc_mask_free(phy, is_src, 0); 1847 (void) d40_alloc_mask_free(phy, is_src, 0);
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c)
1680 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 1851 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1681 if (res) { 1852 if (res) {
1682 chan_err(d40c, "Failed to stop channel\n"); 1853 chan_err(d40c, "Failed to stop channel\n");
1683 return res; 1854 goto out;
1684 } 1855 }
1856
1857 if (d40c->busy) {
1858 pm_runtime_mark_last_busy(d40c->base->dev);
1859 pm_runtime_put_autosuspend(d40c->base->dev);
1860 }
1861
1862 d40c->busy = false;
1685 d40c->phy_chan = NULL; 1863 d40c->phy_chan = NULL;
1686 d40c->configured = false; 1864 d40c->configured = false;
1687 d40c->base->lookup_phy_chans[phy->num] = NULL; 1865 d40c->base->lookup_phy_chans[phy->num] = NULL;
1866out:
1688 1867
1689 return 0; 1868 pm_runtime_mark_last_busy(d40c->base->dev);
1869 pm_runtime_put_autosuspend(d40c->base->dev);
1870 return res;
1690} 1871}
1691 1872
1692static bool d40_is_paused(struct d40_chan *d40c) 1873static bool d40_is_paused(struct d40_chan *d40c)
@@ -1855,7 +2036,7 @@ err:
1855} 2036}
1856 2037
1857static dma_addr_t 2038static dma_addr_t
1858d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) 2039d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
1859{ 2040{
1860 struct stedma40_platform_data *plat = chan->base->plat_data; 2041 struct stedma40_platform_data *plat = chan->base->plat_data;
1861 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2042 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
1864 if (chan->runtime_addr) 2045 if (chan->runtime_addr)
1865 return chan->runtime_addr; 2046 return chan->runtime_addr;
1866 2047
1867 if (direction == DMA_FROM_DEVICE) 2048 if (direction == DMA_DEV_TO_MEM)
1868 addr = plat->dev_rx[cfg->src_dev_type]; 2049 addr = plat->dev_rx[cfg->src_dev_type];
1869 else if (direction == DMA_TO_DEVICE) 2050 else if (direction == DMA_MEM_TO_DEV)
1870 addr = plat->dev_tx[cfg->dst_dev_type]; 2051 addr = plat->dev_tx[cfg->dst_dev_type];
1871 2052
1872 return addr; 2053 return addr;
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
1875static struct dma_async_tx_descriptor * 2056static struct dma_async_tx_descriptor *
1876d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, 2057d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1877 struct scatterlist *sg_dst, unsigned int sg_len, 2058 struct scatterlist *sg_dst, unsigned int sg_len,
1878 enum dma_data_direction direction, unsigned long dma_flags) 2059 enum dma_transfer_direction direction, unsigned long dma_flags)
1879{ 2060{
1880 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); 2061 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
1881 dma_addr_t src_dev_addr = 0; 2062 dma_addr_t src_dev_addr = 0;
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1902 if (direction != DMA_NONE) { 2083 if (direction != DMA_NONE) {
1903 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); 2084 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
1904 2085
1905 if (direction == DMA_FROM_DEVICE) 2086 if (direction == DMA_DEV_TO_MEM)
1906 src_dev_addr = dev_addr; 2087 src_dev_addr = dev_addr;
1907 else if (direction == DMA_TO_DEVICE) 2088 else if (direction == DMA_MEM_TO_DEV)
1908 dst_dev_addr = dev_addr; 2089 dst_dev_addr = dev_addr;
1909 } 2090 }
1910 2091
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2011 goto fail; 2192 goto fail;
2012 } 2193 }
2013 } 2194 }
2014 is_free_phy = (d40c->phy_chan == NULL);
2015 2195
2016 err = d40_allocate_channel(d40c); 2196 err = d40_allocate_channel(d40c, &is_free_phy);
2017 if (err) { 2197 if (err) {
2018 chan_err(d40c, "Failed to allocate channel\n"); 2198 chan_err(d40c, "Failed to allocate channel\n");
2199 d40c->configured = false;
2019 goto fail; 2200 goto fail;
2020 } 2201 }
2021 2202
2203 pm_runtime_get_sync(d40c->base->dev);
2022 /* Fill in basic CFG register values */ 2204 /* Fill in basic CFG register values */
2023 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, 2205 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
2024 &d40c->dst_def_cfg, chan_is_logical(d40c)); 2206 &d40c->dst_def_cfg, chan_is_logical(d40c));
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2038 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; 2220 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2039 } 2221 }
2040 2222
2223 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2224 chan_is_logical(d40c) ? "logical" : "physical",
2225 d40c->phy_chan->num,
2226 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2227
2228
2041 /* 2229 /*
2042 * Only write channel configuration to the DMA if the physical 2230 * Only write channel configuration to the DMA if the physical
2043 * resource is free. In case of multiple logical channels 2231 * resource is free. In case of multiple logical channels
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2046 if (is_free_phy) 2234 if (is_free_phy)
2047 d40_config_write(d40c); 2235 d40_config_write(d40c);
2048fail: 2236fail:
2237 pm_runtime_mark_last_busy(d40c->base->dev);
2238 pm_runtime_put_autosuspend(d40c->base->dev);
2049 spin_unlock_irqrestore(&d40c->lock, flags); 2239 spin_unlock_irqrestore(&d40c->lock, flags);
2050 return err; 2240 return err;
2051} 2241}
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
2108static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, 2298static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2109 struct scatterlist *sgl, 2299 struct scatterlist *sgl,
2110 unsigned int sg_len, 2300 unsigned int sg_len,
2111 enum dma_data_direction direction, 2301 enum dma_transfer_direction direction,
2112 unsigned long dma_flags) 2302 unsigned long dma_flags)
2113{ 2303{
2114 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) 2304 if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
2115 return NULL; 2305 return NULL;
2116 2306
2117 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); 2307 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2120static struct dma_async_tx_descriptor * 2310static struct dma_async_tx_descriptor *
2121dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 2311dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2122 size_t buf_len, size_t period_len, 2312 size_t buf_len, size_t period_len,
2123 enum dma_data_direction direction) 2313 enum dma_transfer_direction direction)
2124{ 2314{
2125 unsigned int periods = buf_len / period_len; 2315 unsigned int periods = buf_len / period_len;
2126 struct dma_async_tx_descriptor *txd; 2316 struct dma_async_tx_descriptor *txd;
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2269 dst_addr_width = config->dst_addr_width; 2459 dst_addr_width = config->dst_addr_width;
2270 dst_maxburst = config->dst_maxburst; 2460 dst_maxburst = config->dst_maxburst;
2271 2461
2272 if (config->direction == DMA_FROM_DEVICE) { 2462 if (config->direction == DMA_DEV_TO_MEM) {
2273 dma_addr_t dev_addr_rx = 2463 dma_addr_t dev_addr_rx =
2274 d40c->base->plat_data->dev_rx[cfg->src_dev_type]; 2464 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2275 2465
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2292 if (dst_maxburst == 0) 2482 if (dst_maxburst == 0)
2293 dst_maxburst = src_maxburst; 2483 dst_maxburst = src_maxburst;
2294 2484
2295 } else if (config->direction == DMA_TO_DEVICE) { 2485 } else if (config->direction == DMA_MEM_TO_DEV) {
2296 dma_addr_t dev_addr_tx = 2486 dma_addr_t dev_addr_tx =
2297 d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; 2487 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2298 2488
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2357 "configured channel %s for %s, data width %d/%d, " 2547 "configured channel %s for %s, data width %d/%d, "
2358 "maxburst %d/%d elements, LE, no flow control\n", 2548 "maxburst %d/%d elements, LE, no flow control\n",
2359 dma_chan_name(chan), 2549 dma_chan_name(chan),
2360 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 2550 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2361 src_addr_width, dst_addr_width, 2551 src_addr_width, dst_addr_width,
2362 src_maxburst, dst_maxburst); 2552 src_maxburst, dst_maxburst);
2363 2553
@@ -2519,6 +2709,72 @@ failure1:
2519 return err; 2709 return err;
2520} 2710}
2521 2711
2712/* Suspend resume functionality */
2713#ifdef CONFIG_PM
2714static int dma40_pm_suspend(struct device *dev)
2715{
2716 struct platform_device *pdev = to_platform_device(dev);
2717 struct d40_base *base = platform_get_drvdata(pdev);
2718 int ret = 0;
2719 if (!pm_runtime_suspended(dev))
2720 return -EBUSY;
2721
2722 if (base->lcpa_regulator)
2723 ret = regulator_disable(base->lcpa_regulator);
2724 return ret;
2725}
2726
2727static int dma40_runtime_suspend(struct device *dev)
2728{
2729 struct platform_device *pdev = to_platform_device(dev);
2730 struct d40_base *base = platform_get_drvdata(pdev);
2731
2732 d40_save_restore_registers(base, true);
2733
2734 /* Don't disable/enable clocks for v1 due to HW bugs */
2735 if (base->rev != 1)
2736 writel_relaxed(base->gcc_pwr_off_mask,
2737 base->virtbase + D40_DREG_GCC);
2738
2739 return 0;
2740}
2741
2742static int dma40_runtime_resume(struct device *dev)
2743{
2744 struct platform_device *pdev = to_platform_device(dev);
2745 struct d40_base *base = platform_get_drvdata(pdev);
2746
2747 if (base->initialized)
2748 d40_save_restore_registers(base, false);
2749
2750 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
2751 base->virtbase + D40_DREG_GCC);
2752 return 0;
2753}
2754
2755static int dma40_resume(struct device *dev)
2756{
2757 struct platform_device *pdev = to_platform_device(dev);
2758 struct d40_base *base = platform_get_drvdata(pdev);
2759 int ret = 0;
2760
2761 if (base->lcpa_regulator)
2762 ret = regulator_enable(base->lcpa_regulator);
2763
2764 return ret;
2765}
2766
2767static const struct dev_pm_ops dma40_pm_ops = {
2768 .suspend = dma40_pm_suspend,
2769 .runtime_suspend = dma40_runtime_suspend,
2770 .runtime_resume = dma40_runtime_resume,
2771 .resume = dma40_resume,
2772};
2773#define DMA40_PM_OPS (&dma40_pm_ops)
2774#else
2775#define DMA40_PM_OPS NULL
2776#endif
2777
2522/* Initialization functions. */ 2778/* Initialization functions. */
2523 2779
2524static int __init d40_phy_res_init(struct d40_base *base) 2780static int __init d40_phy_res_init(struct d40_base *base)
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
2527 int num_phy_chans_avail = 0; 2783 int num_phy_chans_avail = 0;
2528 u32 val[2]; 2784 u32 val[2];
2529 int odd_even_bit = -2; 2785 int odd_even_bit = -2;
2786 int gcc = D40_DREG_GCC_ENA;
2530 2787
2531 val[0] = readl(base->virtbase + D40_DREG_PRSME); 2788 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2532 val[1] = readl(base->virtbase + D40_DREG_PRSMO); 2789 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
2538 /* Mark security only channels as occupied */ 2795 /* Mark security only channels as occupied */
2539 base->phy_res[i].allocated_src = D40_ALLOC_PHY; 2796 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2540 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 2797 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2798 base->phy_res[i].reserved = true;
2799 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
2800 D40_DREG_GCC_SRC);
2801 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
2802 D40_DREG_GCC_DST);
2803
2804
2541 } else { 2805 } else {
2542 base->phy_res[i].allocated_src = D40_ALLOC_FREE; 2806 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2543 base->phy_res[i].allocated_dst = D40_ALLOC_FREE; 2807 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2808 base->phy_res[i].reserved = false;
2544 num_phy_chans_avail++; 2809 num_phy_chans_avail++;
2545 } 2810 }
2546 spin_lock_init(&base->phy_res[i].lock); 2811 spin_lock_init(&base->phy_res[i].lock);
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
2552 2817
2553 base->phy_res[chan].allocated_src = D40_ALLOC_PHY; 2818 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2554 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; 2819 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2820 base->phy_res[chan].reserved = true;
2821 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
2822 D40_DREG_GCC_SRC);
2823 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
2824 D40_DREG_GCC_DST);
2555 num_phy_chans_avail--; 2825 num_phy_chans_avail--;
2556 } 2826 }
2557 2827
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
2572 val[0] = val[0] >> 2; 2842 val[0] = val[0] >> 2;
2573 } 2843 }
2574 2844
2845 /*
2846 * To keep things simple, Enable all clocks initially.
2847 * The clocks will get managed later post channel allocation.
2848 * The clocks for the event lines on which reserved channels exists
2849 * are not managed here.
2850 */
2851 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
2852 base->gcc_pwr_off_mask = gcc;
2853
2575 return num_phy_chans_avail; 2854 return num_phy_chans_avail;
2576} 2855}
2577 2856
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2699 goto failure; 2978 goto failure;
2700 } 2979 }
2701 2980
2702 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * 2981 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
2703 sizeof(struct d40_desc *) * 2982 sizeof(d40_backup_regs_chan),
2704 D40_LCLA_LINK_PER_EVENT_GRP,
2705 GFP_KERNEL); 2983 GFP_KERNEL);
2984 if (!base->reg_val_backup_chan)
2985 goto failure;
2986
2987 base->lcla_pool.alloc_map =
2988 kzalloc(num_phy_chans * sizeof(struct d40_desc *)
2989 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
2706 if (!base->lcla_pool.alloc_map) 2990 if (!base->lcla_pool.alloc_map)
2707 goto failure; 2991 goto failure;
2708 2992
@@ -2741,9 +3025,9 @@ failure:
2741static void __init d40_hw_init(struct d40_base *base) 3025static void __init d40_hw_init(struct d40_base *base)
2742{ 3026{
2743 3027
2744 static const struct d40_reg_val dma_init_reg[] = { 3028 static struct d40_reg_val dma_init_reg[] = {
2745 /* Clock every part of the DMA block from start */ 3029 /* Clock every part of the DMA block from start */
2746 { .reg = D40_DREG_GCC, .val = 0x0000ff01}, 3030 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
2747 3031
2748 /* Interrupts on all logical channels */ 3032 /* Interrupts on all logical channels */
2749 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, 3033 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev)
2943 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); 3227 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2944 goto failure; 3228 goto failure;
2945 } 3229 }
3230 /* If lcla has to be located in ESRAM we don't need to allocate */
3231 if (base->plat_data->use_esram_lcla) {
3232 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3233 "lcla_esram");
3234 if (!res) {
3235 ret = -ENOENT;
3236 d40_err(&pdev->dev,
3237 "No \"lcla_esram\" memory resource\n");
3238 goto failure;
3239 }
3240 base->lcla_pool.base = ioremap(res->start,
3241 resource_size(res));
3242 if (!base->lcla_pool.base) {
3243 ret = -ENOMEM;
3244 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3245 goto failure;
3246 }
3247 writel(res->start, base->virtbase + D40_DREG_LCLA);
2946 3248
2947 ret = d40_lcla_allocate(base); 3249 } else {
2948 if (ret) { 3250 ret = d40_lcla_allocate(base);
2949 d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); 3251 if (ret) {
2950 goto failure; 3252 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3253 goto failure;
3254 }
2951 } 3255 }
2952 3256
2953 spin_lock_init(&base->lcla_pool.lock); 3257 spin_lock_init(&base->lcla_pool.lock);
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev)
2960 goto failure; 3264 goto failure;
2961 } 3265 }
2962 3266
3267 pm_runtime_irq_safe(base->dev);
3268 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3269 pm_runtime_use_autosuspend(base->dev);
3270 pm_runtime_enable(base->dev);
3271 pm_runtime_resume(base->dev);
3272
3273 if (base->plat_data->use_esram_lcla) {
3274
3275 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3276 if (IS_ERR(base->lcpa_regulator)) {
3277 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3278 base->lcpa_regulator = NULL;
3279 goto failure;
3280 }
3281
3282 ret = regulator_enable(base->lcpa_regulator);
3283 if (ret) {
3284 d40_err(&pdev->dev,
3285 "Failed to enable lcpa_regulator\n");
3286 regulator_put(base->lcpa_regulator);
3287 base->lcpa_regulator = NULL;
3288 goto failure;
3289 }
3290 }
3291
3292 base->initialized = true;
2963 err = d40_dmaengine_init(base, num_reserved_chans); 3293 err = d40_dmaengine_init(base, num_reserved_chans);
2964 if (err) 3294 if (err)
2965 goto failure; 3295 goto failure;
@@ -2976,6 +3306,11 @@ failure:
2976 if (base->virtbase) 3306 if (base->virtbase)
2977 iounmap(base->virtbase); 3307 iounmap(base->virtbase);
2978 3308
3309 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3310 iounmap(base->lcla_pool.base);
3311 base->lcla_pool.base = NULL;
3312 }
3313
2979 if (base->lcla_pool.dma_addr) 3314 if (base->lcla_pool.dma_addr)
2980 dma_unmap_single(base->dev, base->lcla_pool.dma_addr, 3315 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
2981 SZ_1K * base->num_phy_chans, 3316 SZ_1K * base->num_phy_chans,
@@ -2998,6 +3333,11 @@ failure:
2998 clk_put(base->clk); 3333 clk_put(base->clk);
2999 } 3334 }
3000 3335
3336 if (base->lcpa_regulator) {
3337 regulator_disable(base->lcpa_regulator);
3338 regulator_put(base->lcpa_regulator);
3339 }
3340
3001 kfree(base->lcla_pool.alloc_map); 3341 kfree(base->lcla_pool.alloc_map);
3002 kfree(base->lookup_log_chans); 3342 kfree(base->lookup_log_chans);
3003 kfree(base->lookup_phy_chans); 3343 kfree(base->lookup_phy_chans);
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = {
3013 .driver = { 3353 .driver = {
3014 .owner = THIS_MODULE, 3354 .owner = THIS_MODULE,
3015 .name = D40_NAME, 3355 .name = D40_NAME,
3356 .pm = DMA40_PM_OPS,
3016 }, 3357 },
3017}; 3358};
3018 3359
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index b44c455158d..8d3d490968a 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -16,6 +16,8 @@
16 16
17#define D40_TYPE_TO_GROUP(type) (type / 16) 17#define D40_TYPE_TO_GROUP(type) (type / 16)
18#define D40_TYPE_TO_EVENT(type) (type % 16) 18#define D40_TYPE_TO_EVENT(type) (type % 16)
19#define D40_GROUP_SIZE 8
20#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
19 21
20/* Most bits of the CFG register are the same in log as in phy mode */ 22/* Most bits of the CFG register are the same in log as in phy mode */
21#define D40_SREG_CFG_MST_POS 15 23#define D40_SREG_CFG_MST_POS 15
@@ -123,6 +125,15 @@
123 125
124/* DMA Register Offsets */ 126/* DMA Register Offsets */
125#define D40_DREG_GCC 0x000 127#define D40_DREG_GCC 0x000
128#define D40_DREG_GCC_ENA 0x1
129/* This assumes that there are only 4 event groups */
130#define D40_DREG_GCC_ENABLE_ALL 0xff01
131#define D40_DREG_GCC_EVTGRP_POS 8
132#define D40_DREG_GCC_SRC 0
133#define D40_DREG_GCC_DST 1
134#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
135 (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
136
126#define D40_DREG_PRTYP 0x004 137#define D40_DREG_PRTYP 0x004
127#define D40_DREG_PRSME 0x008 138#define D40_DREG_PRSME 0x008
128#define D40_DREG_PRSMO 0x00C 139#define D40_DREG_PRSMO 0x00C
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a4a398f2ef6..a6f9c1684a0 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -90,7 +90,7 @@ struct timb_dma_chan {
90 struct list_head queue; 90 struct list_head queue;
91 struct list_head free_list; 91 struct list_head free_list;
92 unsigned int bytes_per_line; 92 unsigned int bytes_per_line;
93 enum dma_data_direction direction; 93 enum dma_transfer_direction direction;
94 unsigned int descs; /* Descriptors to allocate */ 94 unsigned int descs; /* Descriptors to allocate */
95 unsigned int desc_elems; /* number of elems per descriptor */ 95 unsigned int desc_elems; /* number of elems per descriptor */
96}; 96};
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
166 166
167 if (single) 167 if (single)
168 dma_unmap_single(chan2dev(&td_chan->chan), addr, len, 168 dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
169 td_chan->direction); 169 DMA_TO_DEVICE);
170 else 170 else
171 dma_unmap_page(chan2dev(&td_chan->chan), addr, len, 171 dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
172 td_chan->direction); 172 DMA_TO_DEVICE);
173} 173}
174 174
175static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) 175static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
235 "td_chan: %p, chan: %d, membase: %p\n", 235 "td_chan: %p, chan: %d, membase: %p\n",
236 td_chan, td_chan->chan.chan_id, td_chan->membase); 236 td_chan, td_chan->chan.chan_id, td_chan->membase);
237 237
238 if (td_chan->direction == DMA_FROM_DEVICE) { 238 if (td_chan->direction == DMA_DEV_TO_MEM) {
239 239
240 /* descriptor address */ 240 /* descriptor address */
241 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); 241 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
278 txd->cookie); 278 txd->cookie);
279 279
280 /* make sure to stop the transfer */ 280 /* make sure to stop the transfer */
281 if (td_chan->direction == DMA_FROM_DEVICE) 281 if (td_chan->direction == DMA_DEV_TO_MEM)
282 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); 282 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
283/* Currently no support for stopping DMA transfers 283/* Currently no support for stopping DMA transfers
284 else 284 else
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan)
558 558
559static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, 559static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
560 struct scatterlist *sgl, unsigned int sg_len, 560 struct scatterlist *sgl, unsigned int sg_len,
561 enum dma_data_direction direction, unsigned long flags) 561 enum dma_transfer_direction direction, unsigned long flags)
562{ 562{
563 struct timb_dma_chan *td_chan = 563 struct timb_dma_chan *td_chan =
564 container_of(chan, struct timb_dma_chan, chan); 564 container_of(chan, struct timb_dma_chan, chan);
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
606 } 606 }
607 607
608 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, 608 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
609 td_desc->desc_list_len, DMA_TO_DEVICE); 609 td_desc->desc_list_len, DMA_MEM_TO_DEV);
610 610
611 return &td_desc->txd; 611 return &td_desc->txd;
612} 612}
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev)
775 td_chan->descs = pchan->descriptors; 775 td_chan->descs = pchan->descriptors;
776 td_chan->desc_elems = pchan->descriptor_elements; 776 td_chan->desc_elems = pchan->descriptor_elements;
777 td_chan->bytes_per_line = pchan->bytes_per_line; 777 td_chan->bytes_per_line = pchan->bytes_per_line;
778 td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : 778 td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
779 DMA_TO_DEVICE; 779 DMA_MEM_TO_DEV;
780 780
781 td_chan->membase = td->membase + 781 td_chan->membase = td->membase +
782 (i / 2) * TIMBDMA_INSTANCE_OFFSET + 782 (i / 2) * TIMBDMA_INSTANCE_OFFSET +
@@ -841,17 +841,7 @@ static struct platform_driver td_driver = {
841 .remove = __exit_p(td_remove), 841 .remove = __exit_p(td_remove),
842}; 842};
843 843
844static int __init td_init(void) 844module_platform_driver(td_driver);
845{
846 return platform_driver_register(&td_driver);
847}
848module_init(td_init);
849
850static void __exit td_exit(void)
851{
852 platform_driver_unregister(&td_driver);
853}
854module_exit(td_exit);
855 845
856MODULE_LICENSE("GPL v2"); 846MODULE_LICENSE("GPL v2");
857MODULE_DESCRIPTION("Timberdale DMA controller driver"); 847MODULE_DESCRIPTION("Timberdale DMA controller driver");
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index cbd83e362b5..6122c364cf1 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
845 845
846static struct dma_async_tx_descriptor * 846static struct dma_async_tx_descriptor *
847txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 847txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
848 unsigned int sg_len, enum dma_data_direction direction, 848 unsigned int sg_len, enum dma_transfer_direction direction,
849 unsigned long flags) 849 unsigned long flags)
850{ 850{
851 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 851 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
860 860
861 BUG_ON(!ds || !ds->reg_width); 861 BUG_ON(!ds || !ds->reg_width);
862 if (ds->tx_reg) 862 if (ds->tx_reg)
863 BUG_ON(direction != DMA_TO_DEVICE); 863 BUG_ON(direction != DMA_MEM_TO_DEV);
864 else 864 else
865 BUG_ON(direction != DMA_FROM_DEVICE); 865 BUG_ON(direction != DMA_DEV_TO_MEM);
866 if (unlikely(!sg_len)) 866 if (unlikely(!sg_len))
867 return NULL; 867 return NULL;
868 868
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
882 mem = sg_dma_address(sg); 882 mem = sg_dma_address(sg);
883 883
884 if (__is_dmac64(ddev)) { 884 if (__is_dmac64(ddev)) {
885 if (direction == DMA_TO_DEVICE) { 885 if (direction == DMA_MEM_TO_DEV) {
886 desc->hwdesc.SAR = mem; 886 desc->hwdesc.SAR = mem;
887 desc->hwdesc.DAR = ds->tx_reg; 887 desc->hwdesc.DAR = ds->tx_reg;
888 } else { 888 } else {
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
891 } 891 }
892 desc->hwdesc.CNTR = sg_dma_len(sg); 892 desc->hwdesc.CNTR = sg_dma_len(sg);
893 } else { 893 } else {
894 if (direction == DMA_TO_DEVICE) { 894 if (direction == DMA_MEM_TO_DEV) {
895 desc->hwdesc32.SAR = mem; 895 desc->hwdesc32.SAR = mem;
896 desc->hwdesc32.DAR = ds->tx_reg; 896 desc->hwdesc32.DAR = ds->tx_reg;
897 } else { 897 } else {
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
900 } 900 }
901 desc->hwdesc32.CNTR = sg_dma_len(sg); 901 desc->hwdesc32.CNTR = sg_dma_len(sg);
902 } 902 }
903 if (direction == DMA_TO_DEVICE) { 903 if (direction == DMA_MEM_TO_DEV) {
904 sai = ds->reg_width; 904 sai = ds->reg_width;
905 dai = 0; 905 dai = 0;
906 } else { 906 } else {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index cbe7a2fb779..3101dd59e37 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -682,19 +682,19 @@ config I2C_XILINX
682 will be called xilinx_i2c. 682 will be called xilinx_i2c.
683 683
684config I2C_EG20T 684config I2C_EG20T
685 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)" 685 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
686 depends on PCI 686 depends on PCI
687 help 687 help
688 This driver is for PCH(Platform controller Hub) I2C of EG20T which 688 This driver is for PCH(Platform controller Hub) I2C of EG20T which
689 is an IOH(Input/Output Hub) for x86 embedded processor. 689 is an IOH(Input/Output Hub) for x86 embedded processor.
690 This driver can access PCH I2C bus device. 690 This driver can access PCH I2C bus device.
691 691
692 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 692 This driver also can be used for LAPIS Semiconductor IOH(Input/
693 Output Hub), ML7213 and ML7223. 693 Output Hub), ML7213, ML7223 and ML7831.
694 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is 694 ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
695 for MP(Media Phone) use. 695 for MP(Media Phone) use and ML7831 IOH is for general purpose use.
696 ML7213/ML7223 is companion chip for Intel Atom E6xx series. 696 ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
697 ML7213/ML7223 is completely compatible for Intel EG20T PCH. 697 ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
698 698
699comment "External I2C/SMBus adapter drivers" 699comment "External I2C/SMBus adapter drivers"
700 700
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 3ef3557b6e3..ca887764104 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -136,7 +136,8 @@
136/* 136/*
137Set the number of I2C instance max 137Set the number of I2C instance max
138Intel EG20T PCH : 1ch 138Intel EG20T PCH : 1ch
139OKI SEMICONDUCTOR ML7213 IOH : 2ch 139LAPIS Semiconductor ML7213 IOH : 2ch
140LAPIS Semiconductor ML7831 IOH : 1ch
140*/ 141*/
141#define PCH_I2C_MAX_DEV 2 142#define PCH_I2C_MAX_DEV 2
142 143
@@ -180,15 +181,17 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
180static wait_queue_head_t pch_event; 181static wait_queue_head_t pch_event;
181static DEFINE_MUTEX(pch_mutex); 182static DEFINE_MUTEX(pch_mutex);
182 183
183/* Definition for ML7213 by OKI SEMICONDUCTOR */ 184/* Definition for ML7213 by LAPIS Semiconductor */
184#define PCI_VENDOR_ID_ROHM 0x10DB 185#define PCI_VENDOR_ID_ROHM 0x10DB
185#define PCI_DEVICE_ID_ML7213_I2C 0x802D 186#define PCI_DEVICE_ID_ML7213_I2C 0x802D
186#define PCI_DEVICE_ID_ML7223_I2C 0x8010 187#define PCI_DEVICE_ID_ML7223_I2C 0x8010
188#define PCI_DEVICE_ID_ML7831_I2C 0x8817
187 189
188static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = { 190static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = {
189 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, }, 191 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
190 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, }, 192 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
191 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, }, 193 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
194 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
192 {0,} 195 {0,}
193}; 196};
194 197
@@ -243,7 +246,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
243 if (pch_clk > PCH_MAX_CLK) 246 if (pch_clk > PCH_MAX_CLK)
244 pch_clk = 62500; 247 pch_clk = 62500;
245 248
246 pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8; 249 pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
247 /* Set transfer speed in I2CBC */ 250 /* Set transfer speed in I2CBC */
248 iowrite32(pch_i2cbc, p + PCH_I2CBC); 251 iowrite32(pch_i2cbc, p + PCH_I2CBC);
249 252
@@ -918,7 +921,9 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
918 pch_adap->dev.parent = &pdev->dev; 921 pch_adap->dev.parent = &pdev->dev;
919 922
920 pch_i2c_init(&adap_info->pch_data[i]); 923 pch_i2c_init(&adap_info->pch_data[i]);
921 ret = i2c_add_adapter(pch_adap); 924
925 pch_adap->nr = i;
926 ret = i2c_add_numbered_adapter(pch_adap);
922 if (ret) { 927 if (ret) {
923 pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i); 928 pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
924 goto err_add_adapter; 929 goto err_add_adapter;
@@ -1058,8 +1063,8 @@ static void __exit pch_pci_exit(void)
1058} 1063}
1059module_exit(pch_pci_exit); 1064module_exit(pch_pci_exit);
1060 1065
1061MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver"); 1066MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
1062MODULE_LICENSE("GPL"); 1067MODULE_LICENSE("GPL");
1063MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>"); 1068MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>");
1064module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); 1069module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
1065module_param(pch_clk, int, (S_IRUSR | S_IWUSR)); 1070module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index fa23faa20f0..f713eac5504 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -37,6 +37,9 @@
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/clk.h> 38#include <linux/clk.h>
39#include <linux/io.h> 39#include <linux/io.h>
40#include <linux/of.h>
41#include <linux/of_i2c.h>
42#include <linux/of_device.h>
40#include <linux/slab.h> 43#include <linux/slab.h>
41#include <linux/i2c-omap.h> 44#include <linux/i2c-omap.h>
42#include <linux/pm_runtime.h> 45#include <linux/pm_runtime.h>
@@ -182,7 +185,9 @@ struct omap_i2c_dev {
182 u32 latency; /* maximum mpu wkup latency */ 185 u32 latency; /* maximum mpu wkup latency */
183 void (*set_mpu_wkup_lat)(struct device *dev, 186 void (*set_mpu_wkup_lat)(struct device *dev,
184 long latency); 187 long latency);
185 u32 speed; /* Speed of bus in Khz */ 188 u32 speed; /* Speed of bus in kHz */
189 u32 dtrev; /* extra revision from DT */
190 u32 flags;
186 u16 cmd_err; 191 u16 cmd_err;
187 u8 *buf; 192 u8 *buf;
188 u8 *regs; 193 u8 *regs;
@@ -235,7 +240,7 @@ static const u8 reg_map_ip_v2[] = {
235 [OMAP_I2C_BUF_REG] = 0x94, 240 [OMAP_I2C_BUF_REG] = 0x94,
236 [OMAP_I2C_CNT_REG] = 0x98, 241 [OMAP_I2C_CNT_REG] = 0x98,
237 [OMAP_I2C_DATA_REG] = 0x9c, 242 [OMAP_I2C_DATA_REG] = 0x9c,
238 [OMAP_I2C_SYSC_REG] = 0x20, 243 [OMAP_I2C_SYSC_REG] = 0x10,
239 [OMAP_I2C_CON_REG] = 0xa4, 244 [OMAP_I2C_CON_REG] = 0xa4,
240 [OMAP_I2C_OA_REG] = 0xa8, 245 [OMAP_I2C_OA_REG] = 0xa8,
241 [OMAP_I2C_SA_REG] = 0xac, 246 [OMAP_I2C_SA_REG] = 0xac,
@@ -266,11 +271,7 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
266 271
267static void omap_i2c_unidle(struct omap_i2c_dev *dev) 272static void omap_i2c_unidle(struct omap_i2c_dev *dev)
268{ 273{
269 struct omap_i2c_bus_platform_data *pdata; 274 if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
270
271 pdata = dev->dev->platform_data;
272
273 if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
274 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); 275 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
275 omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate); 276 omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
276 omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate); 277 omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -291,13 +292,10 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
291 292
292static void omap_i2c_idle(struct omap_i2c_dev *dev) 293static void omap_i2c_idle(struct omap_i2c_dev *dev)
293{ 294{
294 struct omap_i2c_bus_platform_data *pdata;
295 u16 iv; 295 u16 iv;
296 296
297 pdata = dev->dev->platform_data;
298
299 dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); 297 dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
300 if (pdata->rev == OMAP_I2C_IP_VERSION_2) 298 if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
301 omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1); 299 omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
302 else 300 else
303 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0); 301 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
@@ -320,9 +318,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
320 unsigned long timeout; 318 unsigned long timeout;
321 unsigned long internal_clk = 0; 319 unsigned long internal_clk = 0;
322 struct clk *fclk; 320 struct clk *fclk;
323 struct omap_i2c_bus_platform_data *pdata;
324
325 pdata = dev->dev->platform_data;
326 321
327 if (dev->rev >= OMAP_I2C_OMAP1_REV_2) { 322 if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
328 /* Disable I2C controller before soft reset */ 323 /* Disable I2C controller before soft reset */
@@ -373,7 +368,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
373 } 368 }
374 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); 369 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
375 370
376 if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) { 371 if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
377 /* 372 /*
378 * The I2C functional clock is the armxor_ck, so there's 373 * The I2C functional clock is the armxor_ck, so there's
379 * no need to get "armxor_ck" separately. Now, if OMAP2420 374 * no need to get "armxor_ck" separately. Now, if OMAP2420
@@ -397,7 +392,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
397 psc = fclk_rate / 12000000; 392 psc = fclk_rate / 12000000;
398 } 393 }
399 394
400 if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) { 395 if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
401 396
402 /* 397 /*
403 * HSI2C controller internal clk rate should be 19.2 Mhz for 398 * HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -406,7 +401,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
406 * The filter is iclk (fclk for HS) period. 401 * The filter is iclk (fclk for HS) period.
407 */ 402 */
408 if (dev->speed > 400 || 403 if (dev->speed > 400 ||
409 pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK) 404 dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
410 internal_clk = 19200; 405 internal_clk = 19200;
411 else if (dev->speed > 100) 406 else if (dev->speed > 100)
412 internal_clk = 9600; 407 internal_clk = 9600;
@@ -475,7 +470,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
475 470
476 dev->errata = 0; 471 dev->errata = 0;
477 472
478 if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207) 473 if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
479 dev->errata |= I2C_OMAP_ERRATA_I207; 474 dev->errata |= I2C_OMAP_ERRATA_I207;
480 475
481 /* Enable interrupts */ 476 /* Enable interrupts */
@@ -484,7 +479,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
484 OMAP_I2C_IE_AL) | ((dev->fifo_size) ? 479 OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
485 (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0); 480 (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
486 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); 481 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
487 if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { 482 if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
488 dev->pscstate = psc; 483 dev->pscstate = psc;
489 dev->scllstate = scll; 484 dev->scllstate = scll;
490 dev->sclhstate = sclh; 485 dev->sclhstate = sclh;
@@ -804,9 +799,6 @@ omap_i2c_isr(int this_irq, void *dev_id)
804 u16 bits; 799 u16 bits;
805 u16 stat, w; 800 u16 stat, w;
806 int err, count = 0; 801 int err, count = 0;
807 struct omap_i2c_bus_platform_data *pdata;
808
809 pdata = dev->dev->platform_data;
810 802
811 if (pm_runtime_suspended(dev->dev)) 803 if (pm_runtime_suspended(dev->dev))
812 return IRQ_NONE; 804 return IRQ_NONE;
@@ -830,11 +822,9 @@ complete:
830 ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | 822 ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
831 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); 823 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
832 824
833 if (stat & OMAP_I2C_STAT_NACK) { 825 if (stat & OMAP_I2C_STAT_NACK)
834 err |= OMAP_I2C_STAT_NACK; 826 err |= OMAP_I2C_STAT_NACK;
835 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 827
836 OMAP_I2C_CON_STP);
837 }
838 if (stat & OMAP_I2C_STAT_AL) { 828 if (stat & OMAP_I2C_STAT_AL) {
839 dev_err(dev->dev, "Arbitration lost\n"); 829 dev_err(dev->dev, "Arbitration lost\n");
840 err |= OMAP_I2C_STAT_AL; 830 err |= OMAP_I2C_STAT_AL;
@@ -875,7 +865,7 @@ complete:
875 * Data reg in 2430, omap3 and 865 * Data reg in 2430, omap3 and
876 * omap4 is 8 bit wide 866 * omap4 is 8 bit wide
877 */ 867 */
878 if (pdata->flags & 868 if (dev->flags &
879 OMAP_I2C_FLAG_16BIT_DATA_REG) { 869 OMAP_I2C_FLAG_16BIT_DATA_REG) {
880 if (dev->buf_len) { 870 if (dev->buf_len) {
881 *dev->buf++ = w >> 8; 871 *dev->buf++ = w >> 8;
@@ -918,7 +908,7 @@ complete:
918 * Data reg in 2430, omap3 and 908 * Data reg in 2430, omap3 and
919 * omap4 is 8 bit wide 909 * omap4 is 8 bit wide
920 */ 910 */
921 if (pdata->flags & 911 if (dev->flags &
922 OMAP_I2C_FLAG_16BIT_DATA_REG) { 912 OMAP_I2C_FLAG_16BIT_DATA_REG) {
923 if (dev->buf_len) { 913 if (dev->buf_len) {
924 w |= *dev->buf++ << 8; 914 w |= *dev->buf++ << 8;
@@ -965,6 +955,32 @@ static const struct i2c_algorithm omap_i2c_algo = {
965 .functionality = omap_i2c_func, 955 .functionality = omap_i2c_func,
966}; 956};
967 957
958#ifdef CONFIG_OF
959static struct omap_i2c_bus_platform_data omap3_pdata = {
960 .rev = OMAP_I2C_IP_VERSION_1,
961 .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
962 OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
963 OMAP_I2C_FLAG_BUS_SHIFT_2,
964};
965
966static struct omap_i2c_bus_platform_data omap4_pdata = {
967 .rev = OMAP_I2C_IP_VERSION_2,
968};
969
970static const struct of_device_id omap_i2c_of_match[] = {
971 {
972 .compatible = "ti,omap4-i2c",
973 .data = &omap4_pdata,
974 },
975 {
976 .compatible = "ti,omap3-i2c",
977 .data = &omap3_pdata,
978 },
979 { },
980};
981MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
982#endif
983
968static int __devinit 984static int __devinit
969omap_i2c_probe(struct platform_device *pdev) 985omap_i2c_probe(struct platform_device *pdev)
970{ 986{
@@ -972,9 +988,10 @@ omap_i2c_probe(struct platform_device *pdev)
972 struct i2c_adapter *adap; 988 struct i2c_adapter *adap;
973 struct resource *mem, *irq, *ioarea; 989 struct resource *mem, *irq, *ioarea;
974 struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data; 990 struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
991 struct device_node *node = pdev->dev.of_node;
992 const struct of_device_id *match;
975 irq_handler_t isr; 993 irq_handler_t isr;
976 int r; 994 int r;
977 u32 speed = 0;
978 995
979 /* NOTE: driver uses the static register mapping */ 996 /* NOTE: driver uses the static register mapping */
980 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 997 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1001,15 +1018,24 @@ omap_i2c_probe(struct platform_device *pdev)
1001 goto err_release_region; 1018 goto err_release_region;
1002 } 1019 }
1003 1020
1004 if (pdata != NULL) { 1021 match = of_match_device(omap_i2c_of_match, &pdev->dev);
1005 speed = pdata->clkrate; 1022 if (match) {
1023 u32 freq = 100000; /* default to 100000 Hz */
1024
1025 pdata = match->data;
1026 dev->dtrev = pdata->rev;
1027 dev->flags = pdata->flags;
1028
1029 of_property_read_u32(node, "clock-frequency", &freq);
1030 /* convert DT freq value in Hz into kHz for speed */
1031 dev->speed = freq / 1000;
1032 } else if (pdata != NULL) {
1033 dev->speed = pdata->clkrate;
1034 dev->flags = pdata->flags;
1006 dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; 1035 dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
1007 } else { 1036 dev->dtrev = pdata->rev;
1008 speed = 100; /* Default speed */
1009 dev->set_mpu_wkup_lat = NULL;
1010 } 1037 }
1011 1038
1012 dev->speed = speed;
1013 dev->dev = &pdev->dev; 1039 dev->dev = &pdev->dev;
1014 dev->irq = irq->start; 1040 dev->irq = irq->start;
1015 dev->base = ioremap(mem->start, resource_size(mem)); 1041 dev->base = ioremap(mem->start, resource_size(mem));
@@ -1020,9 +1046,9 @@ omap_i2c_probe(struct platform_device *pdev)
1020 1046
1021 platform_set_drvdata(pdev, dev); 1047 platform_set_drvdata(pdev, dev);
1022 1048
1023 dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3; 1049 dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
1024 1050
1025 if (pdata->rev == OMAP_I2C_IP_VERSION_2) 1051 if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
1026 dev->regs = (u8 *)reg_map_ip_v2; 1052 dev->regs = (u8 *)reg_map_ip_v2;
1027 else 1053 else
1028 dev->regs = (u8 *)reg_map_ip_v1; 1054 dev->regs = (u8 *)reg_map_ip_v1;
@@ -1035,7 +1061,7 @@ omap_i2c_probe(struct platform_device *pdev)
1035 if (dev->rev <= OMAP_I2C_REV_ON_3430) 1061 if (dev->rev <= OMAP_I2C_REV_ON_3430)
1036 dev->errata |= I2C_OMAP3_1P153; 1062 dev->errata |= I2C_OMAP3_1P153;
1037 1063
1038 if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) { 1064 if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
1039 u16 s; 1065 u16 s;
1040 1066
1041 /* Set up the fifo size - Get total size */ 1067 /* Set up the fifo size - Get total size */
@@ -1058,7 +1084,7 @@ omap_i2c_probe(struct platform_device *pdev)
1058 /* calculate wakeup latency constraint for MPU */ 1084 /* calculate wakeup latency constraint for MPU */
1059 if (dev->set_mpu_wkup_lat != NULL) 1085 if (dev->set_mpu_wkup_lat != NULL)
1060 dev->latency = (1000000 * dev->fifo_size) / 1086 dev->latency = (1000000 * dev->fifo_size) /
1061 (1000 * speed / 8); 1087 (1000 * dev->speed / 8);
1062 } 1088 }
1063 1089
1064 /* reset ASAP, clearing any IRQs */ 1090 /* reset ASAP, clearing any IRQs */
@@ -1074,7 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev)
1074 } 1100 }
1075 1101
1076 dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id, 1102 dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
1077 pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed); 1103 dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
1078 1104
1079 pm_runtime_put(dev->dev); 1105 pm_runtime_put(dev->dev);
1080 1106
@@ -1085,6 +1111,7 @@ omap_i2c_probe(struct platform_device *pdev)
1085 strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); 1111 strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
1086 adap->algo = &omap_i2c_algo; 1112 adap->algo = &omap_i2c_algo;
1087 adap->dev.parent = &pdev->dev; 1113 adap->dev.parent = &pdev->dev;
1114 adap->dev.of_node = pdev->dev.of_node;
1088 1115
1089 /* i2c device drivers may be active on return from add_adapter() */ 1116 /* i2c device drivers may be active on return from add_adapter() */
1090 adap->nr = pdev->id; 1117 adap->nr = pdev->id;
@@ -1094,6 +1121,8 @@ omap_i2c_probe(struct platform_device *pdev)
1094 goto err_free_irq; 1121 goto err_free_irq;
1095 } 1122 }
1096 1123
1124 of_i2c_register_devices(adap);
1125
1097 return 0; 1126 return 0;
1098 1127
1099err_free_irq: 1128err_free_irq:
@@ -1166,6 +1195,7 @@ static struct platform_driver omap_i2c_driver = {
1166 .name = "omap_i2c", 1195 .name = "omap_i2c",
1167 .owner = THIS_MODULE, 1196 .owner = THIS_MODULE,
1168 .pm = OMAP_I2C_PM_OPS, 1197 .pm = OMAP_I2C_PM_OPS,
1198 .of_match_table = of_match_ptr(omap_i2c_of_match),
1169 }, 1199 },
1170}; 1200};
1171 1201
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5d2f8e13cf0..20bce51c2e8 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -197,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
197 .enter = &intel_idle }, 197 .enter = &intel_idle },
198}; 198};
199 199
200static int get_driver_data(int cstate) 200static long get_driver_data(int cstate)
201{ 201{
202 int driver_data; 202 int driver_data;
203 switch (cstate) { 203 switch (cstate) {
@@ -232,6 +232,7 @@ static int get_driver_data(int cstate)
232 * @drv: cpuidle driver 232 * @drv: cpuidle driver
233 * @index: index of cpuidle state 233 * @index: index of cpuidle state
234 * 234 *
235 * Must be called under local_irq_disable().
235 */ 236 */
236static int intel_idle(struct cpuidle_device *dev, 237static int intel_idle(struct cpuidle_device *dev,
237 struct cpuidle_driver *drv, int index) 238 struct cpuidle_driver *drv, int index)
@@ -247,8 +248,6 @@ static int intel_idle(struct cpuidle_device *dev,
247 248
248 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 249 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
249 250
250 local_irq_disable();
251
252 /* 251 /*
253 * leave_mm() to avoid costly and often unnecessary wakeups 252 * leave_mm() to avoid costly and often unnecessary wakeups
254 * for flushing the user TLB's associated with the active mm. 253 * for flushing the user TLB's associated with the active mm.
@@ -348,7 +347,8 @@ static int intel_idle_probe(void)
348 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); 347 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
349 348
350 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || 349 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
351 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) 350 !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
351 !mwait_substates)
352 return -ENODEV; 352 return -ENODEV;
353 353
354 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); 354 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
@@ -394,7 +394,7 @@ static int intel_idle_probe(void)
394 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ 394 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
395 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; 395 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
396 else { 396 else {
397 smp_call_function(__setup_broadcast_timer, (void *)true, 1); 397 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
398 register_cpu_notifier(&setup_broadcast_notifier); 398 register_cpu_notifier(&setup_broadcast_notifier);
399 } 399 }
400 400
@@ -471,71 +471,67 @@ static int intel_idle_cpuidle_driver_init(void)
471 } 471 }
472 472
473 if (auto_demotion_disable_flags) 473 if (auto_demotion_disable_flags)
474 smp_call_function(auto_demotion_disable, NULL, 1); 474 on_each_cpu(auto_demotion_disable, NULL, 1);
475 475
476 return 0; 476 return 0;
477} 477}
478 478
479 479
480/* 480/*
481 * intel_idle_cpuidle_devices_init() 481 * intel_idle_cpu_init()
482 * allocate, initialize, register cpuidle_devices 482 * allocate, initialize, register cpuidle_devices
483 * @cpu: cpu/core to initialize
483 */ 484 */
484static int intel_idle_cpuidle_devices_init(void) 485int intel_idle_cpu_init(int cpu)
485{ 486{
486 int i, cstate; 487 int cstate;
487 struct cpuidle_device *dev; 488 struct cpuidle_device *dev;
488 489
489 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); 490 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
490 if (intel_idle_cpuidle_devices == NULL)
491 return -ENOMEM;
492
493 for_each_online_cpu(i) {
494 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
495 491
496 dev->state_count = 1; 492 dev->state_count = 1;
497 493
498 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { 494 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
499 int num_substates; 495 int num_substates;
500 496
501 if (cstate > max_cstate) { 497 if (cstate > max_cstate) {
502 printk(PREFIX "max_cstate %d reached\n", 498 printk(PREFIX "max_cstate %d reached\n",
503 max_cstate); 499 max_cstate);
504 break; 500 break;
505 } 501 }
506 502
507 /* does the state exist in CPUID.MWAIT? */ 503 /* does the state exist in CPUID.MWAIT? */
508 num_substates = (mwait_substates >> ((cstate) * 4)) 504 num_substates = (mwait_substates >> ((cstate) * 4))
509 & MWAIT_SUBSTATE_MASK; 505 & MWAIT_SUBSTATE_MASK;
510 if (num_substates == 0) 506 if (num_substates == 0)
511 continue; 507 continue;
512 /* is the state not enabled? */ 508 /* is the state not enabled? */
513 if (cpuidle_state_table[cstate].enter == NULL) { 509 if (cpuidle_state_table[cstate].enter == NULL)
514 continue; 510 continue;
515 }
516 511
517 dev->states_usage[dev->state_count].driver_data = 512 dev->states_usage[dev->state_count].driver_data =
518 (void *)get_driver_data(cstate); 513 (void *)get_driver_data(cstate);
519 514
520 dev->state_count += 1; 515 dev->state_count += 1;
521 } 516 }
517 dev->cpu = cpu;
522 518
523 dev->cpu = i; 519 if (cpuidle_register_device(dev)) {
524 if (cpuidle_register_device(dev)) { 520 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
525 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", 521 intel_idle_cpuidle_devices_uninit();
526 i); 522 return -EIO;
527 intel_idle_cpuidle_devices_uninit();
528 return -EIO;
529 }
530 } 523 }
531 524
525 if (auto_demotion_disable_flags)
526 smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
527
532 return 0; 528 return 0;
533} 529}
534 530
535 531
536static int __init intel_idle_init(void) 532static int __init intel_idle_init(void)
537{ 533{
538 int retval; 534 int retval, i;
539 535
540 /* Do not load intel_idle at all for now if idle= is passed */ 536 /* Do not load intel_idle at all for now if idle= is passed */
541 if (boot_option_idle_override != IDLE_NO_OVERRIDE) 537 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -553,10 +549,16 @@ static int __init intel_idle_init(void)
553 return retval; 549 return retval;
554 } 550 }
555 551
556 retval = intel_idle_cpuidle_devices_init(); 552 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
557 if (retval) { 553 if (intel_idle_cpuidle_devices == NULL)
558 cpuidle_unregister_driver(&intel_idle_driver); 554 return -ENOMEM;
559 return retval; 555
556 for_each_online_cpu(i) {
557 retval = intel_idle_cpu_init(i);
558 if (retval) {
559 cpuidle_unregister_driver(&intel_idle_driver);
560 return retval;
561 }
560 } 562 }
561 563
562 return 0; 564 return 0;
@@ -568,7 +570,7 @@ static void __exit intel_idle_exit(void)
568 cpuidle_unregister_driver(&intel_idle_driver); 570 cpuidle_unregister_driver(&intel_idle_driver);
569 571
570 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { 572 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
571 smp_call_function(__setup_broadcast_timer, (void *)false, 1); 573 on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
572 unregister_cpu_notifier(&setup_broadcast_notifier); 574 unregister_cpu_notifier(&setup_broadcast_notifier);
573 } 575 }
574 576
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 0f9a84c1046..eb0add311dc 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -55,6 +55,7 @@ source "drivers/infiniband/hw/nes/Kconfig"
55source "drivers/infiniband/ulp/ipoib/Kconfig" 55source "drivers/infiniband/ulp/ipoib/Kconfig"
56 56
57source "drivers/infiniband/ulp/srp/Kconfig" 57source "drivers/infiniband/ulp/srp/Kconfig"
58source "drivers/infiniband/ulp/srpt/Kconfig"
58 59
59source "drivers/infiniband/ulp/iser/Kconfig" 60source "drivers/infiniband/ulp/iser/Kconfig"
60 61
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 9cc7a47d3e6..a3b2d8eac86 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
10obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 10obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
11obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 11obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
12obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 12obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
13obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
13obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ 14obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
new file mode 100644
index 00000000000..31ee83d528d
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -0,0 +1,12 @@
1config INFINIBAND_SRPT
2 tristate "InfiniBand SCSI RDMA Protocol target support"
3 depends on INFINIBAND && TARGET_CORE
4 ---help---
5
6 Support for the SCSI RDMA Protocol (SRP) Target driver. The
7 SRP protocol is a protocol that allows an initiator to access
8 a block storage device on another host (target) over a network
9 that supports the RDMA protocol. Currently the RDMA protocol is
10 supported by InfiniBand and by iWarp network hardware. More
11 information about the SRP protocol can be found on the website
12 of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile
new file mode 100644
index 00000000000..e3ee4bdfffa
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Makefile
@@ -0,0 +1,2 @@
1ccflags-y := -Idrivers/target
2obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
new file mode 100644
index 00000000000..fb1de1f6f29
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
@@ -0,0 +1,139 @@
1/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef IB_DM_MAD_H
35#define IB_DM_MAD_H
36
37#include <linux/types.h>
38
39#include <rdma/ib_mad.h>
40
41enum {
42 /*
43 * See also section 13.4.7 Status Field, table 115 MAD Common Status
44 * Field Bit Values and also section 16.3.1.1 Status Field in the
45 * InfiniBand Architecture Specification.
46 */
47 DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
48 DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
49 DM_MAD_STATUS_INVALID_FIELD = 0x001c,
50 DM_MAD_STATUS_NO_IOC = 0x0100,
51
52 /*
53 * See also the Device Management chapter, section 16.3.3 Attributes,
54 * table 279 Device Management Attributes in the InfiniBand
55 * Architecture Specification.
56 */
57 DM_ATTR_CLASS_PORT_INFO = 0x01,
58 DM_ATTR_IOU_INFO = 0x10,
59 DM_ATTR_IOC_PROFILE = 0x11,
60 DM_ATTR_SVC_ENTRIES = 0x12
61};
62
63struct ib_dm_hdr {
64 u8 reserved[28];
65};
66
67/*
68 * Structure of management datagram sent by the SRP target implementation.
69 * Contains a management datagram header, reliable multi-packet transaction
70 * protocol (RMPP) header and ib_dm_hdr. Notes:
71 * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
72 * management datagrams.
73 * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
74 * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
75 * - The maximum supported size for a management datagram when not using RMPP
76 * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
77 */
78struct ib_dm_mad {
79 struct ib_mad_hdr mad_hdr;
80 struct ib_rmpp_hdr rmpp_hdr;
81 struct ib_dm_hdr dm_hdr;
82 u8 data[IB_MGMT_DEVICE_DATA];
83};
84
85/*
86 * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
87 * Architecture Specification.
88 */
89struct ib_dm_iou_info {
90 __be16 change_id;
91 u8 max_controllers;
92 u8 op_rom;
93 u8 controller_list[128];
94};
95
96/*
97 * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
98 * the InfiniBand Architecture Specification.
99 */
100struct ib_dm_ioc_profile {
101 __be64 guid;
102 __be32 vendor_id;
103 __be32 device_id;
104 __be16 device_version;
105 __be16 reserved1;
106 __be32 subsys_vendor_id;
107 __be32 subsys_device_id;
108 __be16 io_class;
109 __be16 io_subclass;
110 __be16 protocol;
111 __be16 protocol_version;
112 __be16 service_conn;
113 __be16 initiators_supported;
114 __be16 send_queue_depth;
115 u8 reserved2;
116 u8 rdma_read_depth;
117 __be32 send_size;
118 __be32 rdma_size;
119 u8 op_cap_mask;
120 u8 svc_cap_mask;
121 u8 num_svc_entries;
122 u8 reserved3[9];
123 u8 id_string[64];
124};
125
126struct ib_dm_svc_entry {
127 u8 name[40];
128 __be64 id;
129};
130
131/*
132 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
133 * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
134 */
135struct ib_dm_svc_entries {
136 struct ib_dm_svc_entry service_entries[4];
137};
138
139#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
new file mode 100644
index 00000000000..cd5d05e22a7
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -0,0 +1,4073 @@
1/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/ctype.h>
40#include <linux/kthread.h>
41#include <linux/string.h>
42#include <linux/delay.h>
43#include <linux/atomic.h>
44#include <scsi/scsi_tcq.h>
45#include <target/configfs_macros.h>
46#include <target/target_core_base.h>
47#include <target/target_core_fabric_configfs.h>
48#include <target/target_core_fabric.h>
49#include <target/target_core_configfs.h>
50#include "ib_srpt.h"
51
52/* Name of this kernel module. */
53#define DRV_NAME "ib_srpt"
54#define DRV_VERSION "2.0.0"
55#define DRV_RELDATE "2011-02-14"
56
57#define SRPT_ID_STRING "Linux SRP target"
58
59#undef pr_fmt
60#define pr_fmt(fmt) DRV_NAME " " fmt
61
62MODULE_AUTHOR("Vu Pham and Bart Van Assche");
63MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
64 "v" DRV_VERSION " (" DRV_RELDATE ")");
65MODULE_LICENSE("Dual BSD/GPL");
66
67/*
68 * Global Variables
69 */
70
71static u64 srpt_service_guid;
72static spinlock_t srpt_dev_lock; /* Protects srpt_dev_list. */
73static struct list_head srpt_dev_list; /* List of srpt_device structures. */
74
75static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
76module_param(srp_max_req_size, int, 0444);
77MODULE_PARM_DESC(srp_max_req_size,
78 "Maximum size of SRP request messages in bytes.");
79
80static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
81module_param(srpt_srq_size, int, 0444);
82MODULE_PARM_DESC(srpt_srq_size,
83 "Shared receive queue (SRQ) size.");
84
85static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
86{
87 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
88}
89module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
90 0444);
91MODULE_PARM_DESC(srpt_service_guid,
92 "Using this value for ioc_guid, id_ext, and cm_listen_id"
93 " instead of using the node_guid of the first HCA.");
94
95static struct ib_client srpt_client;
96static struct target_fabric_configfs *srpt_target;
97static void srpt_release_channel(struct srpt_rdma_ch *ch);
98static int srpt_queue_status(struct se_cmd *cmd);
99
100/**
101 * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
102 */
103static inline
104enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
105{
106 switch (dir) {
107 case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
108 case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
109 default: return dir;
110 }
111}
112
113/**
114 * srpt_sdev_name() - Return the name associated with the HCA.
115 *
116 * Examples are ib0, ib1, ...
117 */
118static inline const char *srpt_sdev_name(struct srpt_device *sdev)
119{
120 return sdev->device->name;
121}
122
123static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
124{
125 unsigned long flags;
126 enum rdma_ch_state state;
127
128 spin_lock_irqsave(&ch->spinlock, flags);
129 state = ch->state;
130 spin_unlock_irqrestore(&ch->spinlock, flags);
131 return state;
132}
133
134static enum rdma_ch_state
135srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
136{
137 unsigned long flags;
138 enum rdma_ch_state prev;
139
140 spin_lock_irqsave(&ch->spinlock, flags);
141 prev = ch->state;
142 ch->state = new_state;
143 spin_unlock_irqrestore(&ch->spinlock, flags);
144 return prev;
145}
146
147/**
148 * srpt_test_and_set_ch_state() - Test and set the channel state.
149 *
150 * Returns true if and only if the channel state has been set to the new state.
151 */
152static bool
153srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
154 enum rdma_ch_state new)
155{
156 unsigned long flags;
157 enum rdma_ch_state prev;
158
159 spin_lock_irqsave(&ch->spinlock, flags);
160 prev = ch->state;
161 if (prev == old)
162 ch->state = new;
163 spin_unlock_irqrestore(&ch->spinlock, flags);
164 return prev == old;
165}
166
167/**
168 * srpt_event_handler() - Asynchronous IB event callback function.
169 *
170 * Callback function called by the InfiniBand core when an asynchronous IB
171 * event occurs. This callback may occur in interrupt context. See also
172 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
173 * Architecture Specification.
174 */
175static void srpt_event_handler(struct ib_event_handler *handler,
176 struct ib_event *event)
177{
178 struct srpt_device *sdev;
179 struct srpt_port *sport;
180
181 sdev = ib_get_client_data(event->device, &srpt_client);
182 if (!sdev || sdev->device != event->device)
183 return;
184
185 pr_debug("ASYNC event= %d on device= %s\n", event->event,
186 srpt_sdev_name(sdev));
187
188 switch (event->event) {
189 case IB_EVENT_PORT_ERR:
190 if (event->element.port_num <= sdev->device->phys_port_cnt) {
191 sport = &sdev->port[event->element.port_num - 1];
192 sport->lid = 0;
193 sport->sm_lid = 0;
194 }
195 break;
196 case IB_EVENT_PORT_ACTIVE:
197 case IB_EVENT_LID_CHANGE:
198 case IB_EVENT_PKEY_CHANGE:
199 case IB_EVENT_SM_CHANGE:
200 case IB_EVENT_CLIENT_REREGISTER:
201 /* Refresh port data asynchronously. */
202 if (event->element.port_num <= sdev->device->phys_port_cnt) {
203 sport = &sdev->port[event->element.port_num - 1];
204 if (!sport->lid && !sport->sm_lid)
205 schedule_work(&sport->work);
206 }
207 break;
208 default:
209 printk(KERN_ERR "received unrecognized IB event %d\n",
210 event->event);
211 break;
212 }
213}
214
215/**
216 * srpt_srq_event() - SRQ event callback function.
217 */
218static void srpt_srq_event(struct ib_event *event, void *ctx)
219{
220 printk(KERN_INFO "SRQ event %d\n", event->event);
221}
222
223/**
224 * srpt_qp_event() - QP event callback function.
225 */
226static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
227{
228 pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
229 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
230
231 switch (event->event) {
232 case IB_EVENT_COMM_EST:
233 ib_cm_notify(ch->cm_id, event->event);
234 break;
235 case IB_EVENT_QP_LAST_WQE_REACHED:
236 if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
237 CH_RELEASING))
238 srpt_release_channel(ch);
239 else
240 pr_debug("%s: state %d - ignored LAST_WQE.\n",
241 ch->sess_name, srpt_get_ch_state(ch));
242 break;
243 default:
244 printk(KERN_ERR "received unrecognized IB QP event %d\n",
245 event->event);
246 break;
247 }
248}
249
250/**
251 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
252 *
253 * @slot: one-based slot number.
254 * @value: four-bit value.
255 *
256 * Copies the lowest four bits of value in element slot of the array of four
257 * bit elements called c_list (controller list). The index slot is one-based.
258 */
259static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
260{
261 u16 id;
262 u8 tmp;
263
264 id = (slot - 1) / 2;
265 if (slot & 0x1) {
266 tmp = c_list[id] & 0xf;
267 c_list[id] = (value << 4) | tmp;
268 } else {
269 tmp = c_list[id] & 0xf0;
270 c_list[id] = (value & 0xf) | tmp;
271 }
272}
273
274/**
275 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
276 *
277 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
278 * Specification.
279 */
280static void srpt_get_class_port_info(struct ib_dm_mad *mad)
281{
282 struct ib_class_port_info *cif;
283
284 cif = (struct ib_class_port_info *)mad->data;
285 memset(cif, 0, sizeof *cif);
286 cif->base_version = 1;
287 cif->class_version = 1;
288 cif->resp_time_value = 20;
289
290 mad->mad_hdr.status = 0;
291}
292
293/**
294 * srpt_get_iou() - Write IOUnitInfo to a management datagram.
295 *
296 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
297 * Specification. See also section B.7, table B.6 in the SRP r16a document.
298 */
299static void srpt_get_iou(struct ib_dm_mad *mad)
300{
301 struct ib_dm_iou_info *ioui;
302 u8 slot;
303 int i;
304
305 ioui = (struct ib_dm_iou_info *)mad->data;
306 ioui->change_id = __constant_cpu_to_be16(1);
307 ioui->max_controllers = 16;
308
309 /* set present for slot 1 and empty for the rest */
310 srpt_set_ioc(ioui->controller_list, 1, 1);
311 for (i = 1, slot = 2; i < 16; i++, slot++)
312 srpt_set_ioc(ioui->controller_list, slot, 0);
313
314 mad->mad_hdr.status = 0;
315}
316
317/**
318 * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
319 *
320 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
321 * Architecture Specification. See also section B.7, table B.7 in the SRP
322 * r16a document.
323 */
324static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
325 struct ib_dm_mad *mad)
326{
327 struct srpt_device *sdev = sport->sdev;
328 struct ib_dm_ioc_profile *iocp;
329
330 iocp = (struct ib_dm_ioc_profile *)mad->data;
331
332 if (!slot || slot > 16) {
333 mad->mad_hdr.status
334 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
335 return;
336 }
337
338 if (slot > 2) {
339 mad->mad_hdr.status
340 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
341 return;
342 }
343
344 memset(iocp, 0, sizeof *iocp);
345 strcpy(iocp->id_string, SRPT_ID_STRING);
346 iocp->guid = cpu_to_be64(srpt_service_guid);
347 iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
348 iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
349 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
350 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
351 iocp->subsys_device_id = 0x0;
352 iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
353 iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
354 iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
355 iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
356 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
357 iocp->rdma_read_depth = 4;
358 iocp->send_size = cpu_to_be32(srp_max_req_size);
359 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
360 1U << 24));
361 iocp->num_svc_entries = 1;
362 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
363 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
364
365 mad->mad_hdr.status = 0;
366}
367
368/**
369 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
370 *
371 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
372 * Specification. See also section B.7, table B.8 in the SRP r16a document.
373 */
374static void srpt_get_svc_entries(u64 ioc_guid,
375 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
376{
377 struct ib_dm_svc_entries *svc_entries;
378
379 WARN_ON(!ioc_guid);
380
381 if (!slot || slot > 16) {
382 mad->mad_hdr.status
383 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
384 return;
385 }
386
387 if (slot > 2 || lo > hi || hi > 1) {
388 mad->mad_hdr.status
389 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
390 return;
391 }
392
393 svc_entries = (struct ib_dm_svc_entries *)mad->data;
394 memset(svc_entries, 0, sizeof *svc_entries);
395 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
396 snprintf(svc_entries->service_entries[0].name,
397 sizeof(svc_entries->service_entries[0].name),
398 "%s%016llx",
399 SRP_SERVICE_NAME_PREFIX,
400 ioc_guid);
401
402 mad->mad_hdr.status = 0;
403}
404
405/**
406 * srpt_mgmt_method_get() - Process a received management datagram.
407 * @sp: source port through which the MAD has been received.
408 * @rq_mad: received MAD.
409 * @rsp_mad: response MAD.
410 */
411static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
412 struct ib_dm_mad *rsp_mad)
413{
414 u16 attr_id;
415 u32 slot;
416 u8 hi, lo;
417
418 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
419 switch (attr_id) {
420 case DM_ATTR_CLASS_PORT_INFO:
421 srpt_get_class_port_info(rsp_mad);
422 break;
423 case DM_ATTR_IOU_INFO:
424 srpt_get_iou(rsp_mad);
425 break;
426 case DM_ATTR_IOC_PROFILE:
427 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
428 srpt_get_ioc(sp, slot, rsp_mad);
429 break;
430 case DM_ATTR_SVC_ENTRIES:
431 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
432 hi = (u8) ((slot >> 8) & 0xff);
433 lo = (u8) (slot & 0xff);
434 slot = (u16) ((slot >> 16) & 0xffff);
435 srpt_get_svc_entries(srpt_service_guid,
436 slot, hi, lo, rsp_mad);
437 break;
438 default:
439 rsp_mad->mad_hdr.status =
440 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
441 break;
442 }
443}
444
445/**
446 * srpt_mad_send_handler() - Post MAD-send callback function.
447 */
448static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
449 struct ib_mad_send_wc *mad_wc)
450{
451 ib_destroy_ah(mad_wc->send_buf->ah);
452 ib_free_send_mad(mad_wc->send_buf);
453}
454
455/**
456 * srpt_mad_recv_handler() - MAD reception callback function.
457 */
458static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
459 struct ib_mad_recv_wc *mad_wc)
460{
461 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
462 struct ib_ah *ah;
463 struct ib_mad_send_buf *rsp;
464 struct ib_dm_mad *dm_mad;
465
466 if (!mad_wc || !mad_wc->recv_buf.mad)
467 return;
468
469 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
470 mad_wc->recv_buf.grh, mad_agent->port_num);
471 if (IS_ERR(ah))
472 goto err;
473
474 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
475
476 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
477 mad_wc->wc->pkey_index, 0,
478 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
479 GFP_KERNEL);
480 if (IS_ERR(rsp))
481 goto err_rsp;
482
483 rsp->ah = ah;
484
485 dm_mad = rsp->mad;
486 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
487 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
488 dm_mad->mad_hdr.status = 0;
489
490 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
491 case IB_MGMT_METHOD_GET:
492 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
493 break;
494 case IB_MGMT_METHOD_SET:
495 dm_mad->mad_hdr.status =
496 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
497 break;
498 default:
499 dm_mad->mad_hdr.status =
500 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
501 break;
502 }
503
504 if (!ib_post_send_mad(rsp, NULL)) {
505 ib_free_recv_mad(mad_wc);
506 /* will destroy_ah & free_send_mad in send completion */
507 return;
508 }
509
510 ib_free_send_mad(rsp);
511
512err_rsp:
513 ib_destroy_ah(ah);
514err:
515 ib_free_recv_mad(mad_wc);
516}
517
518/**
519 * srpt_refresh_port() - Configure a HCA port.
520 *
521 * Enable InfiniBand management datagram processing, update the cached sm_lid,
522 * lid and gid values, and register a callback function for processing MADs
523 * on the specified port.
524 *
525 * Note: It is safe to call this function more than once for the same port.
526 */
527static int srpt_refresh_port(struct srpt_port *sport)
528{
529 struct ib_mad_reg_req reg_req;
530 struct ib_port_modify port_modify;
531 struct ib_port_attr port_attr;
532 int ret;
533
534 memset(&port_modify, 0, sizeof port_modify);
535 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
536 port_modify.clr_port_cap_mask = 0;
537
538 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
539 if (ret)
540 goto err_mod_port;
541
542 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
543 if (ret)
544 goto err_query_port;
545
546 sport->sm_lid = port_attr.sm_lid;
547 sport->lid = port_attr.lid;
548
549 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
550 if (ret)
551 goto err_query_port;
552
553 if (!sport->mad_agent) {
554 memset(&reg_req, 0, sizeof reg_req);
555 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
556 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
557 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
558 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
559
560 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
561 sport->port,
562 IB_QPT_GSI,
563 &reg_req, 0,
564 srpt_mad_send_handler,
565 srpt_mad_recv_handler,
566 sport);
567 if (IS_ERR(sport->mad_agent)) {
568 ret = PTR_ERR(sport->mad_agent);
569 sport->mad_agent = NULL;
570 goto err_query_port;
571 }
572 }
573
574 return 0;
575
576err_query_port:
577
578 port_modify.set_port_cap_mask = 0;
579 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
580 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
581
582err_mod_port:
583
584 return ret;
585}
586
587/**
588 * srpt_unregister_mad_agent() - Unregister MAD callback functions.
589 *
590 * Note: It is safe to call this function more than once for the same device.
591 */
592static void srpt_unregister_mad_agent(struct srpt_device *sdev)
593{
594 struct ib_port_modify port_modify = {
595 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
596 };
597 struct srpt_port *sport;
598 int i;
599
600 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
601 sport = &sdev->port[i - 1];
602 WARN_ON(sport->port != i);
603 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
604 printk(KERN_ERR "disabling MAD processing failed.\n");
605 if (sport->mad_agent) {
606 ib_unregister_mad_agent(sport->mad_agent);
607 sport->mad_agent = NULL;
608 }
609 }
610}
611
612/**
613 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
614 */
615static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
616 int ioctx_size, int dma_size,
617 enum dma_data_direction dir)
618{
619 struct srpt_ioctx *ioctx;
620
621 ioctx = kmalloc(ioctx_size, GFP_KERNEL);
622 if (!ioctx)
623 goto err;
624
625 ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
626 if (!ioctx->buf)
627 goto err_free_ioctx;
628
629 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
630 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
631 goto err_free_buf;
632
633 return ioctx;
634
635err_free_buf:
636 kfree(ioctx->buf);
637err_free_ioctx:
638 kfree(ioctx);
639err:
640 return NULL;
641}
642
643/**
644 * srpt_free_ioctx() - Free an SRPT I/O context structure.
645 */
646static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
647 int dma_size, enum dma_data_direction dir)
648{
649 if (!ioctx)
650 return;
651
652 ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
653 kfree(ioctx->buf);
654 kfree(ioctx);
655}
656
657/**
658 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
659 * @sdev: Device to allocate the I/O context ring for.
660 * @ring_size: Number of elements in the I/O context ring.
661 * @ioctx_size: I/O context size.
662 * @dma_size: DMA buffer size.
663 * @dir: DMA data direction.
664 */
665static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
666 int ring_size, int ioctx_size,
667 int dma_size, enum dma_data_direction dir)
668{
669 struct srpt_ioctx **ring;
670 int i;
671
672 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
673 && ioctx_size != sizeof(struct srpt_send_ioctx));
674
675 ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
676 if (!ring)
677 goto out;
678 for (i = 0; i < ring_size; ++i) {
679 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
680 if (!ring[i])
681 goto err;
682 ring[i]->index = i;
683 }
684 goto out;
685
686err:
687 while (--i >= 0)
688 srpt_free_ioctx(sdev, ring[i], dma_size, dir);
689 kfree(ring);
690out:
691 return ring;
692}
693
694/**
695 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
696 */
697static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
698 struct srpt_device *sdev, int ring_size,
699 int dma_size, enum dma_data_direction dir)
700{
701 int i;
702
703 for (i = 0; i < ring_size; ++i)
704 srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
705 kfree(ioctx_ring);
706}
707
708/**
709 * srpt_get_cmd_state() - Get the state of a SCSI command.
710 */
711static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
712{
713 enum srpt_command_state state;
714 unsigned long flags;
715
716 BUG_ON(!ioctx);
717
718 spin_lock_irqsave(&ioctx->spinlock, flags);
719 state = ioctx->state;
720 spin_unlock_irqrestore(&ioctx->spinlock, flags);
721 return state;
722}
723
724/**
725 * srpt_set_cmd_state() - Set the state of a SCSI command.
726 *
727 * Does not modify the state of aborted commands. Returns the previous command
728 * state.
729 */
730static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
731 enum srpt_command_state new)
732{
733 enum srpt_command_state previous;
734 unsigned long flags;
735
736 BUG_ON(!ioctx);
737
738 spin_lock_irqsave(&ioctx->spinlock, flags);
739 previous = ioctx->state;
740 if (previous != SRPT_STATE_DONE)
741 ioctx->state = new;
742 spin_unlock_irqrestore(&ioctx->spinlock, flags);
743
744 return previous;
745}
746
747/**
748 * srpt_test_and_set_cmd_state() - Test and set the state of a command.
749 *
750 * Returns true if and only if the previous command state was equal to 'old'.
751 */
752static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
753 enum srpt_command_state old,
754 enum srpt_command_state new)
755{
756 enum srpt_command_state previous;
757 unsigned long flags;
758
759 WARN_ON(!ioctx);
760 WARN_ON(old == SRPT_STATE_DONE);
761 WARN_ON(new == SRPT_STATE_NEW);
762
763 spin_lock_irqsave(&ioctx->spinlock, flags);
764 previous = ioctx->state;
765 if (previous == old)
766 ioctx->state = new;
767 spin_unlock_irqrestore(&ioctx->spinlock, flags);
768 return previous == old;
769}
770
771/**
772 * srpt_post_recv() - Post an IB receive request.
773 */
774static int srpt_post_recv(struct srpt_device *sdev,
775 struct srpt_recv_ioctx *ioctx)
776{
777 struct ib_sge list;
778 struct ib_recv_wr wr, *bad_wr;
779
780 BUG_ON(!sdev);
781 wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
782
783 list.addr = ioctx->ioctx.dma;
784 list.length = srp_max_req_size;
785 list.lkey = sdev->mr->lkey;
786
787 wr.next = NULL;
788 wr.sg_list = &list;
789 wr.num_sge = 1;
790
791 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
792}
793
794/**
795 * srpt_post_send() - Post an IB send request.
796 *
797 * Returns zero upon success and a non-zero value upon failure.
798 */
799static int srpt_post_send(struct srpt_rdma_ch *ch,
800 struct srpt_send_ioctx *ioctx, int len)
801{
802 struct ib_sge list;
803 struct ib_send_wr wr, *bad_wr;
804 struct srpt_device *sdev = ch->sport->sdev;
805 int ret;
806
807 atomic_inc(&ch->req_lim);
808
809 ret = -ENOMEM;
810 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
811 printk(KERN_WARNING "IB send queue full (needed 1)\n");
812 goto out;
813 }
814
815 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
816 DMA_TO_DEVICE);
817
818 list.addr = ioctx->ioctx.dma;
819 list.length = len;
820 list.lkey = sdev->mr->lkey;
821
822 wr.next = NULL;
823 wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
824 wr.sg_list = &list;
825 wr.num_sge = 1;
826 wr.opcode = IB_WR_SEND;
827 wr.send_flags = IB_SEND_SIGNALED;
828
829 ret = ib_post_send(ch->qp, &wr, &bad_wr);
830
831out:
832 if (ret < 0) {
833 atomic_inc(&ch->sq_wr_avail);
834 atomic_dec(&ch->req_lim);
835 }
836 return ret;
837}
838
839/**
840 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
841 * @ioctx: Pointer to the I/O context associated with the request.
842 * @srp_cmd: Pointer to the SRP_CMD request data.
843 * @dir: Pointer to the variable to which the transfer direction will be
844 * written.
845 * @data_len: Pointer to the variable to which the total data length of all
846 * descriptors in the SRP_CMD request will be written.
847 *
848 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
849 *
850 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
851 * -ENOMEM when memory allocation fails and zero upon success.
852 */
853static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
854 struct srp_cmd *srp_cmd,
855 enum dma_data_direction *dir, u64 *data_len)
856{
857 struct srp_indirect_buf *idb;
858 struct srp_direct_buf *db;
859 unsigned add_cdb_offset;
860 int ret;
861
862 /*
863 * The pointer computations below will only be compiled correctly
864 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
865 * whether srp_cmd::add_data has been declared as a byte pointer.
866 */
867 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
868 && !__same_type(srp_cmd->add_data[0], (u8)0));
869
870 BUG_ON(!dir);
871 BUG_ON(!data_len);
872
873 ret = 0;
874 *data_len = 0;
875
876 /*
877 * The lower four bits of the buffer format field contain the DATA-IN
878 * buffer descriptor format, and the highest four bits contain the
879 * DATA-OUT buffer descriptor format.
880 */
881 *dir = DMA_NONE;
882 if (srp_cmd->buf_fmt & 0xf)
883 /* DATA-IN: transfer data from target to initiator (read). */
884 *dir = DMA_FROM_DEVICE;
885 else if (srp_cmd->buf_fmt >> 4)
886 /* DATA-OUT: transfer data from initiator to target (write). */
887 *dir = DMA_TO_DEVICE;
888
889 /*
890 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
891 * CDB LENGTH' field are reserved and the size in bytes of this field
892 * is four times the value specified in bits 3..7. Hence the "& ~3".
893 */
894 add_cdb_offset = srp_cmd->add_cdb_len & ~3;
895 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
896 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
897 ioctx->n_rbuf = 1;
898 ioctx->rbufs = &ioctx->single_rbuf;
899
900 db = (struct srp_direct_buf *)(srp_cmd->add_data
901 + add_cdb_offset);
902 memcpy(ioctx->rbufs, db, sizeof *db);
903 *data_len = be32_to_cpu(db->len);
904 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
905 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
906 idb = (struct srp_indirect_buf *)(srp_cmd->add_data
907 + add_cdb_offset);
908
909 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
910
911 if (ioctx->n_rbuf >
912 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
913 printk(KERN_ERR "received unsupported SRP_CMD request"
914 " type (%u out + %u in != %u / %zu)\n",
915 srp_cmd->data_out_desc_cnt,
916 srp_cmd->data_in_desc_cnt,
917 be32_to_cpu(idb->table_desc.len),
918 sizeof(*db));
919 ioctx->n_rbuf = 0;
920 ret = -EINVAL;
921 goto out;
922 }
923
924 if (ioctx->n_rbuf == 1)
925 ioctx->rbufs = &ioctx->single_rbuf;
926 else {
927 ioctx->rbufs =
928 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
929 if (!ioctx->rbufs) {
930 ioctx->n_rbuf = 0;
931 ret = -ENOMEM;
932 goto out;
933 }
934 }
935
936 db = idb->desc_list;
937 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
938 *data_len = be32_to_cpu(idb->len);
939 }
940out:
941 return ret;
942}
943
944/**
945 * srpt_init_ch_qp() - Initialize queue pair attributes.
946 *
947 * Initialized the attributes of queue pair 'qp' by allowing local write,
948 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
949 */
950static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
951{
952 struct ib_qp_attr *attr;
953 int ret;
954
955 attr = kzalloc(sizeof *attr, GFP_KERNEL);
956 if (!attr)
957 return -ENOMEM;
958
959 attr->qp_state = IB_QPS_INIT;
960 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
961 IB_ACCESS_REMOTE_WRITE;
962 attr->port_num = ch->sport->port;
963 attr->pkey_index = 0;
964
965 ret = ib_modify_qp(qp, attr,
966 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
967 IB_QP_PKEY_INDEX);
968
969 kfree(attr);
970 return ret;
971}
972
973/**
974 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
975 * @ch: channel of the queue pair.
976 * @qp: queue pair to change the state of.
977 *
978 * Returns zero upon success and a negative value upon failure.
979 *
980 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
981 * If this structure ever becomes larger, it might be necessary to allocate
982 * it dynamically instead of on the stack.
983 */
984static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
985{
986 struct ib_qp_attr qp_attr;
987 int attr_mask;
988 int ret;
989
990 qp_attr.qp_state = IB_QPS_RTR;
991 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
992 if (ret)
993 goto out;
994
995 qp_attr.max_dest_rd_atomic = 4;
996
997 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
998
999out:
1000 return ret;
1001}
1002
1003/**
1004 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
1005 * @ch: channel of the queue pair.
1006 * @qp: queue pair to change the state of.
1007 *
1008 * Returns zero upon success and a negative value upon failure.
1009 *
1010 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1011 * If this structure ever becomes larger, it might be necessary to allocate
1012 * it dynamically instead of on the stack.
1013 */
1014static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1015{
1016 struct ib_qp_attr qp_attr;
1017 int attr_mask;
1018 int ret;
1019
1020 qp_attr.qp_state = IB_QPS_RTS;
1021 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1022 if (ret)
1023 goto out;
1024
1025 qp_attr.max_rd_atomic = 4;
1026
1027 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1028
1029out:
1030 return ret;
1031}
1032
1033/**
1034 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
1035 */
1036static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1037{
1038 struct ib_qp_attr qp_attr;
1039
1040 qp_attr.qp_state = IB_QPS_ERR;
1041 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1042}
1043
1044/**
1045 * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
1046 */
1047static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1048 struct srpt_send_ioctx *ioctx)
1049{
1050 struct scatterlist *sg;
1051 enum dma_data_direction dir;
1052
1053 BUG_ON(!ch);
1054 BUG_ON(!ioctx);
1055 BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
1056
1057 while (ioctx->n_rdma)
1058 kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
1059
1060 kfree(ioctx->rdma_ius);
1061 ioctx->rdma_ius = NULL;
1062
1063 if (ioctx->mapped_sg_count) {
1064 sg = ioctx->sg;
1065 WARN_ON(!sg);
1066 dir = ioctx->cmd.data_direction;
1067 BUG_ON(dir == DMA_NONE);
1068 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
1069 opposite_dma_dir(dir));
1070 ioctx->mapped_sg_count = 0;
1071 }
1072}
1073
1074/**
1075 * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
1076 */
1077static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1078 struct srpt_send_ioctx *ioctx)
1079{
1080 struct se_cmd *cmd;
1081 struct scatterlist *sg, *sg_orig;
1082 int sg_cnt;
1083 enum dma_data_direction dir;
1084 struct rdma_iu *riu;
1085 struct srp_direct_buf *db;
1086 dma_addr_t dma_addr;
1087 struct ib_sge *sge;
1088 u64 raddr;
1089 u32 rsize;
1090 u32 tsize;
1091 u32 dma_len;
1092 int count, nrdma;
1093 int i, j, k;
1094
1095 BUG_ON(!ch);
1096 BUG_ON(!ioctx);
1097 cmd = &ioctx->cmd;
1098 dir = cmd->data_direction;
1099 BUG_ON(dir == DMA_NONE);
1100
1101 transport_do_task_sg_chain(cmd);
1102 ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
1103 ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
1104
1105 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
1106 opposite_dma_dir(dir));
1107 if (unlikely(!count))
1108 return -EAGAIN;
1109
1110 ioctx->mapped_sg_count = count;
1111
1112 if (ioctx->rdma_ius && ioctx->n_rdma_ius)
1113 nrdma = ioctx->n_rdma_ius;
1114 else {
1115 nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
1116 + ioctx->n_rbuf;
1117
1118 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
1119 if (!ioctx->rdma_ius)
1120 goto free_mem;
1121
1122 ioctx->n_rdma_ius = nrdma;
1123 }
1124
1125 db = ioctx->rbufs;
1126 tsize = cmd->data_length;
1127 dma_len = sg_dma_len(&sg[0]);
1128 riu = ioctx->rdma_ius;
1129
1130 /*
1131 * For each remote desc - calculate the #ib_sge.
1132 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1133 * each remote desc rdma_iu is required a rdma wr;
1134 * else
1135 * we need to allocate extra rdma_iu to carry extra #ib_sge in
1136 * another rdma wr
1137 */
1138 for (i = 0, j = 0;
1139 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1140 rsize = be32_to_cpu(db->len);
1141 raddr = be64_to_cpu(db->va);
1142 riu->raddr = raddr;
1143 riu->rkey = be32_to_cpu(db->key);
1144 riu->sge_cnt = 0;
1145
1146 /* calculate how many sge required for this remote_buf */
1147 while (rsize > 0 && tsize > 0) {
1148
1149 if (rsize >= dma_len) {
1150 tsize -= dma_len;
1151 rsize -= dma_len;
1152 raddr += dma_len;
1153
1154 if (tsize > 0) {
1155 ++j;
1156 if (j < count) {
1157 sg = sg_next(sg);
1158 dma_len = sg_dma_len(sg);
1159 }
1160 }
1161 } else {
1162 tsize -= rsize;
1163 dma_len -= rsize;
1164 rsize = 0;
1165 }
1166
1167 ++riu->sge_cnt;
1168
1169 if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
1170 ++ioctx->n_rdma;
1171 riu->sge =
1172 kmalloc(riu->sge_cnt * sizeof *riu->sge,
1173 GFP_KERNEL);
1174 if (!riu->sge)
1175 goto free_mem;
1176
1177 ++riu;
1178 riu->sge_cnt = 0;
1179 riu->raddr = raddr;
1180 riu->rkey = be32_to_cpu(db->key);
1181 }
1182 }
1183
1184 ++ioctx->n_rdma;
1185 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
1186 GFP_KERNEL);
1187 if (!riu->sge)
1188 goto free_mem;
1189 }
1190
1191 db = ioctx->rbufs;
1192 tsize = cmd->data_length;
1193 riu = ioctx->rdma_ius;
1194 sg = sg_orig;
1195 dma_len = sg_dma_len(&sg[0]);
1196 dma_addr = sg_dma_address(&sg[0]);
1197
1198 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1199 for (i = 0, j = 0;
1200 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1201 rsize = be32_to_cpu(db->len);
1202 sge = riu->sge;
1203 k = 0;
1204
1205 while (rsize > 0 && tsize > 0) {
1206 sge->addr = dma_addr;
1207 sge->lkey = ch->sport->sdev->mr->lkey;
1208
1209 if (rsize >= dma_len) {
1210 sge->length =
1211 (tsize < dma_len) ? tsize : dma_len;
1212 tsize -= dma_len;
1213 rsize -= dma_len;
1214
1215 if (tsize > 0) {
1216 ++j;
1217 if (j < count) {
1218 sg = sg_next(sg);
1219 dma_len = sg_dma_len(sg);
1220 dma_addr = sg_dma_address(sg);
1221 }
1222 }
1223 } else {
1224 sge->length = (tsize < rsize) ? tsize : rsize;
1225 tsize -= rsize;
1226 dma_len -= rsize;
1227 dma_addr += rsize;
1228 rsize = 0;
1229 }
1230
1231 ++k;
1232 if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
1233 ++riu;
1234 sge = riu->sge;
1235 k = 0;
1236 } else if (rsize > 0 && tsize > 0)
1237 ++sge;
1238 }
1239 }
1240
1241 return 0;
1242
1243free_mem:
1244 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1245
1246 return -ENOMEM;
1247}
1248
1249/**
1250 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1251 */
1252static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1253{
1254 struct srpt_send_ioctx *ioctx;
1255 unsigned long flags;
1256
1257 BUG_ON(!ch);
1258
1259 ioctx = NULL;
1260 spin_lock_irqsave(&ch->spinlock, flags);
1261 if (!list_empty(&ch->free_list)) {
1262 ioctx = list_first_entry(&ch->free_list,
1263 struct srpt_send_ioctx, free_list);
1264 list_del(&ioctx->free_list);
1265 }
1266 spin_unlock_irqrestore(&ch->spinlock, flags);
1267
1268 if (!ioctx)
1269 return ioctx;
1270
1271 BUG_ON(ioctx->ch != ch);
1272 kref_init(&ioctx->kref);
1273 spin_lock_init(&ioctx->spinlock);
1274 ioctx->state = SRPT_STATE_NEW;
1275 ioctx->n_rbuf = 0;
1276 ioctx->rbufs = NULL;
1277 ioctx->n_rdma = 0;
1278 ioctx->n_rdma_ius = 0;
1279 ioctx->rdma_ius = NULL;
1280 ioctx->mapped_sg_count = 0;
1281 init_completion(&ioctx->tx_done);
1282 ioctx->queue_status_only = false;
1283 /*
1284 * transport_init_se_cmd() does not initialize all fields, so do it
1285 * here.
1286 */
1287 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1288 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1289
1290 return ioctx;
1291}
1292
1293/**
1294 * srpt_put_send_ioctx() - Free up resources.
1295 */
1296static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
1297{
1298 struct srpt_rdma_ch *ch;
1299 unsigned long flags;
1300
1301 BUG_ON(!ioctx);
1302 ch = ioctx->ch;
1303 BUG_ON(!ch);
1304
1305 WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
1306
1307 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1308 transport_generic_free_cmd(&ioctx->cmd, 0);
1309
1310 if (ioctx->n_rbuf > 1) {
1311 kfree(ioctx->rbufs);
1312 ioctx->rbufs = NULL;
1313 ioctx->n_rbuf = 0;
1314 }
1315
1316 spin_lock_irqsave(&ch->spinlock, flags);
1317 list_add(&ioctx->free_list, &ch->free_list);
1318 spin_unlock_irqrestore(&ch->spinlock, flags);
1319}
1320
1321static void srpt_put_send_ioctx_kref(struct kref *kref)
1322{
1323 srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
1324}
1325
1326/**
1327 * srpt_abort_cmd() - Abort a SCSI command.
1328 * @ioctx: I/O context associated with the SCSI command.
1329 * @context: Preferred execution context.
1330 */
1331static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1332{
1333 enum srpt_command_state state;
1334 unsigned long flags;
1335
1336 BUG_ON(!ioctx);
1337
1338 /*
1339 * If the command is in a state where the target core is waiting for
1340 * the ib_srpt driver, change the state to the next state. Changing
1341 * the state of the command from SRPT_STATE_NEED_DATA to
1342 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
1343 * function a second time.
1344 */
1345
1346 spin_lock_irqsave(&ioctx->spinlock, flags);
1347 state = ioctx->state;
1348 switch (state) {
1349 case SRPT_STATE_NEED_DATA:
1350 ioctx->state = SRPT_STATE_DATA_IN;
1351 break;
1352 case SRPT_STATE_DATA_IN:
1353 case SRPT_STATE_CMD_RSP_SENT:
1354 case SRPT_STATE_MGMT_RSP_SENT:
1355 ioctx->state = SRPT_STATE_DONE;
1356 break;
1357 default:
1358 break;
1359 }
1360 spin_unlock_irqrestore(&ioctx->spinlock, flags);
1361
1362 if (state == SRPT_STATE_DONE)
1363 goto out;
1364
1365 pr_debug("Aborting cmd with state %d and tag %lld\n", state,
1366 ioctx->tag);
1367
1368 switch (state) {
1369 case SRPT_STATE_NEW:
1370 case SRPT_STATE_DATA_IN:
1371 case SRPT_STATE_MGMT:
1372 /*
1373 * Do nothing - defer abort processing until
1374 * srpt_queue_response() is invoked.
1375 */
1376 WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
1377 break;
1378 case SRPT_STATE_NEED_DATA:
1379 /* DMA_TO_DEVICE (write) - RDMA read error. */
1380 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1381 transport_generic_handle_data(&ioctx->cmd);
1382 break;
1383 case SRPT_STATE_CMD_RSP_SENT:
1384 /*
1385 * SRP_RSP sending failed or the SRP_RSP send completion has
1386 * not been received in time.
1387 */
1388 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1389 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1390 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1391 break;
1392 case SRPT_STATE_MGMT_RSP_SENT:
1393 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1394 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1395 break;
1396 default:
1397 WARN_ON("ERROR: unexpected command state");
1398 break;
1399 }
1400
1401out:
1402 return state;
1403}
1404
1405/**
1406 * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
1407 */
1408static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
1409{
1410 struct srpt_send_ioctx *ioctx;
1411 enum srpt_command_state state;
1412 struct se_cmd *cmd;
1413 u32 index;
1414
1415 atomic_inc(&ch->sq_wr_avail);
1416
1417 index = idx_from_wr_id(wr_id);
1418 ioctx = ch->ioctx_ring[index];
1419 state = srpt_get_cmd_state(ioctx);
1420 cmd = &ioctx->cmd;
1421
1422 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1423 && state != SRPT_STATE_MGMT_RSP_SENT
1424 && state != SRPT_STATE_NEED_DATA
1425 && state != SRPT_STATE_DONE);
1426
1427 /* If SRP_RSP sending failed, undo the ch->req_lim change. */
1428 if (state == SRPT_STATE_CMD_RSP_SENT
1429 || state == SRPT_STATE_MGMT_RSP_SENT)
1430 atomic_dec(&ch->req_lim);
1431
1432 srpt_abort_cmd(ioctx);
1433}
1434
1435/**
1436 * srpt_handle_send_comp() - Process an IB send completion notification.
1437 */
1438static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
1439 struct srpt_send_ioctx *ioctx)
1440{
1441 enum srpt_command_state state;
1442
1443 atomic_inc(&ch->sq_wr_avail);
1444
1445 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1446
1447 if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1448 && state != SRPT_STATE_MGMT_RSP_SENT
1449 && state != SRPT_STATE_DONE))
1450 pr_debug("state = %d\n", state);
1451
1452 if (state != SRPT_STATE_DONE)
1453 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1454 else
1455 printk(KERN_ERR "IB completion has been received too late for"
1456 " wr_id = %u.\n", ioctx->ioctx.index);
1457}
1458
1459/**
1460 * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
1461 *
1462 * Note: transport_generic_handle_data() is asynchronous so unmapping the
1463 * data that has been transferred via IB RDMA must be postponed until the
1464 * check_stop_free() callback.
1465 */
1466static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
1467 struct srpt_send_ioctx *ioctx,
1468 enum srpt_opcode opcode)
1469{
1470 WARN_ON(ioctx->n_rdma <= 0);
1471 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1472
1473 if (opcode == SRPT_RDMA_READ_LAST) {
1474 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1475 SRPT_STATE_DATA_IN))
1476 transport_generic_handle_data(&ioctx->cmd);
1477 else
1478 printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
1479 __LINE__, srpt_get_cmd_state(ioctx));
1480 } else if (opcode == SRPT_RDMA_ABORT) {
1481 ioctx->rdma_aborted = true;
1482 } else {
1483 WARN(true, "unexpected opcode %d\n", opcode);
1484 }
1485}
1486
1487/**
1488 * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
1489 */
1490static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1491 struct srpt_send_ioctx *ioctx,
1492 enum srpt_opcode opcode)
1493{
1494 struct se_cmd *cmd;
1495 enum srpt_command_state state;
1496
1497 cmd = &ioctx->cmd;
1498 state = srpt_get_cmd_state(ioctx);
1499 switch (opcode) {
1500 case SRPT_RDMA_READ_LAST:
1501 if (ioctx->n_rdma <= 0) {
1502 printk(KERN_ERR "Received invalid RDMA read"
1503 " error completion with idx %d\n",
1504 ioctx->ioctx.index);
1505 break;
1506 }
1507 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1508 if (state == SRPT_STATE_NEED_DATA)
1509 srpt_abort_cmd(ioctx);
1510 else
1511 printk(KERN_ERR "%s[%d]: wrong state = %d\n",
1512 __func__, __LINE__, state);
1513 break;
1514 case SRPT_RDMA_WRITE_LAST:
1515 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1516 break;
1517 default:
1518 printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
1519 __LINE__, opcode);
1520 break;
1521 }
1522}
1523
1524/**
1525 * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1526 * @ch: RDMA channel through which the request has been received.
1527 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1528 * be built in the buffer ioctx->buf points at and hence this function will
1529 * overwrite the request data.
1530 * @tag: tag of the request for which this response is being generated.
1531 * @status: value for the STATUS field of the SRP_RSP information unit.
1532 *
1533 * Returns the size in bytes of the SRP_RSP response.
1534 *
1535 * An SRP_RSP response contains a SCSI status or service response. See also
1536 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1537 * response. See also SPC-2 for more information about sense data.
1538 */
1539static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1540 struct srpt_send_ioctx *ioctx, u64 tag,
1541 int status)
1542{
1543 struct srp_rsp *srp_rsp;
1544 const u8 *sense_data;
1545 int sense_data_len, max_sense_len;
1546
1547 /*
1548 * The lowest bit of all SAM-3 status codes is zero (see also
1549 * paragraph 5.3 in SAM-3).
1550 */
1551 WARN_ON(status & 1);
1552
1553 srp_rsp = ioctx->ioctx.buf;
1554 BUG_ON(!srp_rsp);
1555
1556 sense_data = ioctx->sense_data;
1557 sense_data_len = ioctx->cmd.scsi_sense_length;
1558 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1559
1560 memset(srp_rsp, 0, sizeof *srp_rsp);
1561 srp_rsp->opcode = SRP_RSP;
1562 srp_rsp->req_lim_delta =
1563 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1564 srp_rsp->tag = tag;
1565 srp_rsp->status = status;
1566
1567 if (sense_data_len) {
1568 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1569 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1570 if (sense_data_len > max_sense_len) {
1571 printk(KERN_WARNING "truncated sense data from %d to %d"
1572 " bytes\n", sense_data_len, max_sense_len);
1573 sense_data_len = max_sense_len;
1574 }
1575
1576 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1577 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1578 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1579 }
1580
1581 return sizeof(*srp_rsp) + sense_data_len;
1582}
1583
1584/**
1585 * srpt_build_tskmgmt_rsp() - Build a task management response.
1586 * @ch: RDMA channel through which the request has been received.
1587 * @ioctx: I/O context in which the SRP_RSP response will be built.
1588 * @rsp_code: RSP_CODE that will be stored in the response.
1589 * @tag: Tag of the request for which this response is being generated.
1590 *
1591 * Returns the size in bytes of the SRP_RSP response.
1592 *
1593 * An SRP_RSP response contains a SCSI status or service response. See also
1594 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1595 * response.
1596 */
1597static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1598 struct srpt_send_ioctx *ioctx,
1599 u8 rsp_code, u64 tag)
1600{
1601 struct srp_rsp *srp_rsp;
1602 int resp_data_len;
1603 int resp_len;
1604
1605 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
1606 resp_len = sizeof(*srp_rsp) + resp_data_len;
1607
1608 srp_rsp = ioctx->ioctx.buf;
1609 BUG_ON(!srp_rsp);
1610 memset(srp_rsp, 0, sizeof *srp_rsp);
1611
1612 srp_rsp->opcode = SRP_RSP;
1613 srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
1614 + atomic_xchg(&ch->req_lim_delta, 0));
1615 srp_rsp->tag = tag;
1616
1617 if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
1618 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1619 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1620 srp_rsp->data[3] = rsp_code;
1621 }
1622
1623 return resp_len;
1624}
1625
1626#define NO_SUCH_LUN ((uint64_t)-1LL)
1627
1628/*
1629 * SCSI LUN addressing method. See also SAM-2 and the section about
1630 * eight byte LUNs.
1631 */
1632enum scsi_lun_addr_method {
1633 SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
1634 SCSI_LUN_ADDR_METHOD_FLAT = 1,
1635 SCSI_LUN_ADDR_METHOD_LUN = 2,
1636 SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
1637};
1638
1639/*
1640 * srpt_unpack_lun() - Convert from network LUN to linear LUN.
1641 *
1642 * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
1643 * order (big endian) to a linear LUN. Supports three LUN addressing methods:
1644 * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
1645 */
1646static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
1647{
1648 uint64_t res = NO_SUCH_LUN;
1649 int addressing_method;
1650
1651 if (unlikely(len < 2)) {
1652 printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
1653 "more", len);
1654 goto out;
1655 }
1656
1657 switch (len) {
1658 case 8:
1659 if ((*((__be64 *)lun) &
1660 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1661 goto out_err;
1662 break;
1663 case 4:
1664 if (*((__be16 *)&lun[2]) != 0)
1665 goto out_err;
1666 break;
1667 case 6:
1668 if (*((__be32 *)&lun[2]) != 0)
1669 goto out_err;
1670 break;
1671 case 2:
1672 break;
1673 default:
1674 goto out_err;
1675 }
1676
1677 addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
1678 switch (addressing_method) {
1679 case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
1680 case SCSI_LUN_ADDR_METHOD_FLAT:
1681 case SCSI_LUN_ADDR_METHOD_LUN:
1682 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1683 break;
1684
1685 case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
1686 default:
1687 printk(KERN_ERR "Unimplemented LUN addressing method %u",
1688 addressing_method);
1689 break;
1690 }
1691
1692out:
1693 return res;
1694
1695out_err:
1696 printk(KERN_ERR "Support for multi-level LUNs has not yet been"
1697 " implemented");
1698 goto out;
1699}
1700
1701static int srpt_check_stop_free(struct se_cmd *cmd)
1702{
1703 struct srpt_send_ioctx *ioctx;
1704
1705 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
1706 return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1707}
1708
1709/**
1710 * srpt_handle_cmd() - Process SRP_CMD.
1711 */
1712static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1713 struct srpt_recv_ioctx *recv_ioctx,
1714 struct srpt_send_ioctx *send_ioctx)
1715{
1716 struct se_cmd *cmd;
1717 struct srp_cmd *srp_cmd;
1718 uint64_t unpacked_lun;
1719 u64 data_len;
1720 enum dma_data_direction dir;
1721 int ret;
1722
1723 BUG_ON(!send_ioctx);
1724
1725 srp_cmd = recv_ioctx->ioctx.buf;
1726 kref_get(&send_ioctx->kref);
1727 cmd = &send_ioctx->cmd;
1728 send_ioctx->tag = srp_cmd->tag;
1729
1730 switch (srp_cmd->task_attr) {
1731 case SRP_CMD_SIMPLE_Q:
1732 cmd->sam_task_attr = MSG_SIMPLE_TAG;
1733 break;
1734 case SRP_CMD_ORDERED_Q:
1735 default:
1736 cmd->sam_task_attr = MSG_ORDERED_TAG;
1737 break;
1738 case SRP_CMD_HEAD_OF_Q:
1739 cmd->sam_task_attr = MSG_HEAD_TAG;
1740 break;
1741 case SRP_CMD_ACA:
1742 cmd->sam_task_attr = MSG_ACA_TAG;
1743 break;
1744 }
1745
1746 ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
1747 if (ret) {
1748 printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
1749 srp_cmd->tag);
1750 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1751 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1752 goto send_sense;
1753 }
1754
1755 cmd->data_length = data_len;
1756 cmd->data_direction = dir;
1757 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
1758 sizeof(srp_cmd->lun));
1759 if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0)
1760 goto send_sense;
1761 ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
1762 if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
1763 srpt_queue_status(cmd);
1764 else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)
1765 goto send_sense;
1766 else
1767 WARN_ON_ONCE(ret);
1768
1769 transport_handle_cdb_direct(cmd);
1770 return 0;
1771
1772send_sense:
1773 transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
1774 0);
1775 return -1;
1776}
1777
1778/**
1779 * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
1780 * @ch: RDMA channel of the task management request.
1781 * @fn: Task management function to perform.
1782 * @req_tag: Tag of the SRP task management request.
1783 * @mgmt_ioctx: I/O context of the task management request.
1784 *
1785 * Returns zero if the target core will process the task management
1786 * request asynchronously.
1787 *
1788 * Note: It is assumed that the initiator serializes tag-based task management
1789 * requests.
1790 */
1791static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
1792{
1793 struct srpt_device *sdev;
1794 struct srpt_rdma_ch *ch;
1795 struct srpt_send_ioctx *target;
1796 int ret, i;
1797
1798 ret = -EINVAL;
1799 ch = ioctx->ch;
1800 BUG_ON(!ch);
1801 BUG_ON(!ch->sport);
1802 sdev = ch->sport->sdev;
1803 BUG_ON(!sdev);
1804 spin_lock_irq(&sdev->spinlock);
1805 for (i = 0; i < ch->rq_size; ++i) {
1806 target = ch->ioctx_ring[i];
1807 if (target->cmd.se_lun == ioctx->cmd.se_lun &&
1808 target->tag == tag &&
1809 srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
1810 ret = 0;
1811 /* now let the target core abort &target->cmd; */
1812 break;
1813 }
1814 }
1815 spin_unlock_irq(&sdev->spinlock);
1816 return ret;
1817}
1818
1819static int srp_tmr_to_tcm(int fn)
1820{
1821 switch (fn) {
1822 case SRP_TSK_ABORT_TASK:
1823 return TMR_ABORT_TASK;
1824 case SRP_TSK_ABORT_TASK_SET:
1825 return TMR_ABORT_TASK_SET;
1826 case SRP_TSK_CLEAR_TASK_SET:
1827 return TMR_CLEAR_TASK_SET;
1828 case SRP_TSK_LUN_RESET:
1829 return TMR_LUN_RESET;
1830 case SRP_TSK_CLEAR_ACA:
1831 return TMR_CLEAR_ACA;
1832 default:
1833 return -1;
1834 }
1835}
1836
1837/**
1838 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1839 *
1840 * Returns 0 if and only if the request will be processed by the target core.
1841 *
1842 * For more information about SRP_TSK_MGMT information units, see also section
1843 * 6.7 in the SRP r16a document.
1844 */
1845static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1846 struct srpt_recv_ioctx *recv_ioctx,
1847 struct srpt_send_ioctx *send_ioctx)
1848{
1849 struct srp_tsk_mgmt *srp_tsk;
1850 struct se_cmd *cmd;
1851 uint64_t unpacked_lun;
1852 int tcm_tmr;
1853 int res;
1854
1855 BUG_ON(!send_ioctx);
1856
1857 srp_tsk = recv_ioctx->ioctx.buf;
1858 cmd = &send_ioctx->cmd;
1859
1860 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1861 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1862 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1863
1864 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1865 send_ioctx->tag = srp_tsk->tag;
1866 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1867 if (tcm_tmr < 0) {
1868 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1869 send_ioctx->cmd.se_tmr_req->response =
1870 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
1871 goto process_tmr;
1872 }
1873 cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
1874 if (!cmd->se_tmr_req) {
1875 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1876 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1877 goto process_tmr;
1878 }
1879
1880 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
1881 sizeof(srp_tsk->lun));
1882 res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
1883 if (res) {
1884 pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
1885 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1886 send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1887 goto process_tmr;
1888 }
1889
1890 if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
1891 srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
1892
1893process_tmr:
1894 kref_get(&send_ioctx->kref);
1895 if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
1896 transport_generic_handle_tmr(&send_ioctx->cmd);
1897 else
1898 transport_send_check_condition_and_sense(cmd,
1899 cmd->scsi_sense_reason, 0);
1900
1901}
1902
1903/**
1904 * srpt_handle_new_iu() - Process a newly received information unit.
1905 * @ch: RDMA channel through which the information unit has been received.
1906 * @ioctx: SRPT I/O context associated with the information unit.
1907 */
1908static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1909 struct srpt_recv_ioctx *recv_ioctx,
1910 struct srpt_send_ioctx *send_ioctx)
1911{
1912 struct srp_cmd *srp_cmd;
1913 enum rdma_ch_state ch_state;
1914
1915 BUG_ON(!ch);
1916 BUG_ON(!recv_ioctx);
1917
1918 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1919 recv_ioctx->ioctx.dma, srp_max_req_size,
1920 DMA_FROM_DEVICE);
1921
1922 ch_state = srpt_get_ch_state(ch);
1923 if (unlikely(ch_state == CH_CONNECTING)) {
1924 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1925 goto out;
1926 }
1927
1928 if (unlikely(ch_state != CH_LIVE))
1929 goto out;
1930
1931 srp_cmd = recv_ioctx->ioctx.buf;
1932 if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1933 if (!send_ioctx)
1934 send_ioctx = srpt_get_send_ioctx(ch);
1935 if (unlikely(!send_ioctx)) {
1936 list_add_tail(&recv_ioctx->wait_list,
1937 &ch->cmd_wait_list);
1938 goto out;
1939 }
1940 }
1941
1942 transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
1943 0, DMA_NONE, MSG_SIMPLE_TAG,
1944 send_ioctx->sense_data);
1945
1946 switch (srp_cmd->opcode) {
1947 case SRP_CMD:
1948 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1949 break;
1950 case SRP_TSK_MGMT:
1951 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1952 break;
1953 case SRP_I_LOGOUT:
1954 printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
1955 break;
1956 case SRP_CRED_RSP:
1957 pr_debug("received SRP_CRED_RSP\n");
1958 break;
1959 case SRP_AER_RSP:
1960 pr_debug("received SRP_AER_RSP\n");
1961 break;
1962 case SRP_RSP:
1963 printk(KERN_ERR "Received SRP_RSP\n");
1964 break;
1965 default:
1966 printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
1967 srp_cmd->opcode);
1968 break;
1969 }
1970
1971 srpt_post_recv(ch->sport->sdev, recv_ioctx);
1972out:
1973 return;
1974}
1975
1976static void srpt_process_rcv_completion(struct ib_cq *cq,
1977 struct srpt_rdma_ch *ch,
1978 struct ib_wc *wc)
1979{
1980 struct srpt_device *sdev = ch->sport->sdev;
1981 struct srpt_recv_ioctx *ioctx;
1982 u32 index;
1983
1984 index = idx_from_wr_id(wc->wr_id);
1985 if (wc->status == IB_WC_SUCCESS) {
1986 int req_lim;
1987
1988 req_lim = atomic_dec_return(&ch->req_lim);
1989 if (unlikely(req_lim < 0))
1990 printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
1991 ioctx = sdev->ioctx_ring[index];
1992 srpt_handle_new_iu(ch, ioctx, NULL);
1993 } else {
1994 printk(KERN_INFO "receiving failed for idx %u with status %d\n",
1995 index, wc->status);
1996 }
1997}
1998
1999/**
2000 * srpt_process_send_completion() - Process an IB send completion.
2001 *
2002 * Note: Although this has not yet been observed during tests, at least in
2003 * theory it is possible that the srpt_get_send_ioctx() call invoked by
2004 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
2005 * value in each response is set to one, and it is possible that this response
2006 * makes the initiator send a new request before the send completion for that
2007 * response has been processed. This could e.g. happen if the call to
2008 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
2009 * if IB retransmission causes generation of the send completion to be
2010 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
2011 * are queued on cmd_wait_list. The code below processes these delayed
2012 * requests one at a time.
2013 */
2014static void srpt_process_send_completion(struct ib_cq *cq,
2015 struct srpt_rdma_ch *ch,
2016 struct ib_wc *wc)
2017{
2018 struct srpt_send_ioctx *send_ioctx;
2019 uint32_t index;
2020 enum srpt_opcode opcode;
2021
2022 index = idx_from_wr_id(wc->wr_id);
2023 opcode = opcode_from_wr_id(wc->wr_id);
2024 send_ioctx = ch->ioctx_ring[index];
2025 if (wc->status == IB_WC_SUCCESS) {
2026 if (opcode == SRPT_SEND)
2027 srpt_handle_send_comp(ch, send_ioctx);
2028 else {
2029 WARN_ON(opcode != SRPT_RDMA_ABORT &&
2030 wc->opcode != IB_WC_RDMA_READ);
2031 srpt_handle_rdma_comp(ch, send_ioctx, opcode);
2032 }
2033 } else {
2034 if (opcode == SRPT_SEND) {
2035 printk(KERN_INFO "sending response for idx %u failed"
2036 " with status %d\n", index, wc->status);
2037 srpt_handle_send_err_comp(ch, wc->wr_id);
2038 } else if (opcode != SRPT_RDMA_MID) {
2039 printk(KERN_INFO "RDMA t %d for idx %u failed with"
2040 " status %d", opcode, index, wc->status);
2041 srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
2042 }
2043 }
2044
2045 while (unlikely(opcode == SRPT_SEND
2046 && !list_empty(&ch->cmd_wait_list)
2047 && srpt_get_ch_state(ch) == CH_LIVE
2048 && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
2049 struct srpt_recv_ioctx *recv_ioctx;
2050
2051 recv_ioctx = list_first_entry(&ch->cmd_wait_list,
2052 struct srpt_recv_ioctx,
2053 wait_list);
2054 list_del(&recv_ioctx->wait_list);
2055 srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
2056 }
2057}
2058
2059static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
2060{
2061 struct ib_wc *const wc = ch->wc;
2062 int i, n;
2063
2064 WARN_ON(cq != ch->cq);
2065
2066 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2067 while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
2068 for (i = 0; i < n; i++) {
2069 if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
2070 srpt_process_rcv_completion(cq, ch, &wc[i]);
2071 else
2072 srpt_process_send_completion(cq, ch, &wc[i]);
2073 }
2074 }
2075}
2076
2077/**
2078 * srpt_completion() - IB completion queue callback function.
2079 *
2080 * Notes:
2081 * - It is guaranteed that a completion handler will never be invoked
2082 * concurrently on two different CPUs for the same completion queue. See also
2083 * Documentation/infiniband/core_locking.txt and the implementation of
2084 * handle_edge_irq() in kernel/irq/chip.c.
2085 * - When threaded IRQs are enabled, completion handlers are invoked in thread
2086 * context instead of interrupt context.
2087 */
2088static void srpt_completion(struct ib_cq *cq, void *ctx)
2089{
2090 struct srpt_rdma_ch *ch = ctx;
2091
2092 wake_up_interruptible(&ch->wait_queue);
2093}
2094
2095static int srpt_compl_thread(void *arg)
2096{
2097 struct srpt_rdma_ch *ch;
2098
2099 /* Hibernation / freezing of the SRPT kernel thread is not supported. */
2100 current->flags |= PF_NOFREEZE;
2101
2102 ch = arg;
2103 BUG_ON(!ch);
2104 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
2105 ch->sess_name, ch->thread->comm, current->pid);
2106 while (!kthread_should_stop()) {
2107 wait_event_interruptible(ch->wait_queue,
2108 (srpt_process_completion(ch->cq, ch),
2109 kthread_should_stop()));
2110 }
2111 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
2112 ch->sess_name, ch->thread->comm, current->pid);
2113 return 0;
2114}
2115
2116/**
2117 * srpt_create_ch_ib() - Create receive and send completion queues.
2118 */
2119static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2120{
2121 struct ib_qp_init_attr *qp_init;
2122 struct srpt_port *sport = ch->sport;
2123 struct srpt_device *sdev = sport->sdev;
2124 u32 srp_sq_size = sport->port_attrib.srp_sq_size;
2125 int ret;
2126
2127 WARN_ON(ch->rq_size < 1);
2128
2129 ret = -ENOMEM;
2130 qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
2131 if (!qp_init)
2132 goto out;
2133
2134 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
2135 ch->rq_size + srp_sq_size, 0);
2136 if (IS_ERR(ch->cq)) {
2137 ret = PTR_ERR(ch->cq);
2138 printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
2139 ch->rq_size + srp_sq_size, ret);
2140 goto out;
2141 }
2142
2143 qp_init->qp_context = (void *)ch;
2144 qp_init->event_handler
2145 = (void(*)(struct ib_event *, void*))srpt_qp_event;
2146 qp_init->send_cq = ch->cq;
2147 qp_init->recv_cq = ch->cq;
2148 qp_init->srq = sdev->srq;
2149 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
2150 qp_init->qp_type = IB_QPT_RC;
2151 qp_init->cap.max_send_wr = srp_sq_size;
2152 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
2153
2154 ch->qp = ib_create_qp(sdev->pd, qp_init);
2155 if (IS_ERR(ch->qp)) {
2156 ret = PTR_ERR(ch->qp);
2157 printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
2158 goto err_destroy_cq;
2159 }
2160
2161 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
2162
2163 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
2164 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
2165 qp_init->cap.max_send_wr, ch->cm_id);
2166
2167 ret = srpt_init_ch_qp(ch, ch->qp);
2168 if (ret)
2169 goto err_destroy_qp;
2170
2171 init_waitqueue_head(&ch->wait_queue);
2172
2173 pr_debug("creating thread for session %s\n", ch->sess_name);
2174
2175 ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
2176 if (IS_ERR(ch->thread)) {
2177 printk(KERN_ERR "failed to create kernel thread %ld\n",
2178 PTR_ERR(ch->thread));
2179 ch->thread = NULL;
2180 goto err_destroy_qp;
2181 }
2182
2183out:
2184 kfree(qp_init);
2185 return ret;
2186
2187err_destroy_qp:
2188 ib_destroy_qp(ch->qp);
2189err_destroy_cq:
2190 ib_destroy_cq(ch->cq);
2191 goto out;
2192}
2193
2194static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
2195{
2196 if (ch->thread)
2197 kthread_stop(ch->thread);
2198
2199 ib_destroy_qp(ch->qp);
2200 ib_destroy_cq(ch->cq);
2201}
2202
2203/**
2204 * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
2205 *
2206 * Reset the QP and make sure all resources associated with the channel will
2207 * be deallocated at an appropriate time.
2208 *
2209 * Note: The caller must hold ch->sport->sdev->spinlock.
2210 */
2211static void __srpt_close_ch(struct srpt_rdma_ch *ch)
2212{
2213 struct srpt_device *sdev;
2214 enum rdma_ch_state prev_state;
2215 unsigned long flags;
2216
2217 sdev = ch->sport->sdev;
2218
2219 spin_lock_irqsave(&ch->spinlock, flags);
2220 prev_state = ch->state;
2221 switch (prev_state) {
2222 case CH_CONNECTING:
2223 case CH_LIVE:
2224 ch->state = CH_DISCONNECTING;
2225 break;
2226 default:
2227 break;
2228 }
2229 spin_unlock_irqrestore(&ch->spinlock, flags);
2230
2231 switch (prev_state) {
2232 case CH_CONNECTING:
2233 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
2234 NULL, 0);
2235 /* fall through */
2236 case CH_LIVE:
2237 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
2238 printk(KERN_ERR "sending CM DREQ failed.\n");
2239 break;
2240 case CH_DISCONNECTING:
2241 break;
2242 case CH_DRAINING:
2243 case CH_RELEASING:
2244 break;
2245 }
2246}
2247
2248/**
2249 * srpt_close_ch() - Close an RDMA channel.
2250 */
2251static void srpt_close_ch(struct srpt_rdma_ch *ch)
2252{
2253 struct srpt_device *sdev;
2254
2255 sdev = ch->sport->sdev;
2256 spin_lock_irq(&sdev->spinlock);
2257 __srpt_close_ch(ch);
2258 spin_unlock_irq(&sdev->spinlock);
2259}
2260
2261/**
2262 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
2263 * @cm_id: Pointer to the CM ID of the channel to be drained.
2264 *
2265 * Note: Must be called from inside srpt_cm_handler to avoid a race between
2266 * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
2267 * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
2268 * waits until all target sessions for the associated IB device have been
2269 * unregistered and target session registration involves a call to
2270 * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
2271 * this function has finished).
2272 */
2273static void srpt_drain_channel(struct ib_cm_id *cm_id)
2274{
2275 struct srpt_device *sdev;
2276 struct srpt_rdma_ch *ch;
2277 int ret;
2278 bool do_reset = false;
2279
2280 WARN_ON_ONCE(irqs_disabled());
2281
2282 sdev = cm_id->context;
2283 BUG_ON(!sdev);
2284 spin_lock_irq(&sdev->spinlock);
2285 list_for_each_entry(ch, &sdev->rch_list, list) {
2286 if (ch->cm_id == cm_id) {
2287 do_reset = srpt_test_and_set_ch_state(ch,
2288 CH_CONNECTING, CH_DRAINING) ||
2289 srpt_test_and_set_ch_state(ch,
2290 CH_LIVE, CH_DRAINING) ||
2291 srpt_test_and_set_ch_state(ch,
2292 CH_DISCONNECTING, CH_DRAINING);
2293 break;
2294 }
2295 }
2296 spin_unlock_irq(&sdev->spinlock);
2297
2298 if (do_reset) {
2299 ret = srpt_ch_qp_err(ch);
2300 if (ret < 0)
2301 printk(KERN_ERR "Setting queue pair in error state"
2302 " failed: %d\n", ret);
2303 }
2304}
2305
2306/**
2307 * srpt_find_channel() - Look up an RDMA channel.
2308 * @cm_id: Pointer to the CM ID of the channel to be looked up.
2309 *
2310 * Return NULL if no matching RDMA channel has been found.
2311 */
2312static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
2313 struct ib_cm_id *cm_id)
2314{
2315 struct srpt_rdma_ch *ch;
2316 bool found;
2317
2318 WARN_ON_ONCE(irqs_disabled());
2319 BUG_ON(!sdev);
2320
2321 found = false;
2322 spin_lock_irq(&sdev->spinlock);
2323 list_for_each_entry(ch, &sdev->rch_list, list) {
2324 if (ch->cm_id == cm_id) {
2325 found = true;
2326 break;
2327 }
2328 }
2329 spin_unlock_irq(&sdev->spinlock);
2330
2331 return found ? ch : NULL;
2332}
2333
2334/**
2335 * srpt_release_channel() - Release channel resources.
2336 *
2337 * Schedules the actual release because:
2338 * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
2339 * trigger a deadlock.
2340 * - It is not safe to call TCM transport_* functions from interrupt context.
2341 */
2342static void srpt_release_channel(struct srpt_rdma_ch *ch)
2343{
2344 schedule_work(&ch->release_work);
2345}
2346
2347static void srpt_release_channel_work(struct work_struct *w)
2348{
2349 struct srpt_rdma_ch *ch;
2350 struct srpt_device *sdev;
2351
2352 ch = container_of(w, struct srpt_rdma_ch, release_work);
2353 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
2354 ch->release_done);
2355
2356 sdev = ch->sport->sdev;
2357 BUG_ON(!sdev);
2358
2359 transport_deregister_session_configfs(ch->sess);
2360 transport_deregister_session(ch->sess);
2361 ch->sess = NULL;
2362
2363 srpt_destroy_ch_ib(ch);
2364
2365 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2366 ch->sport->sdev, ch->rq_size,
2367 ch->rsp_size, DMA_TO_DEVICE);
2368
2369 spin_lock_irq(&sdev->spinlock);
2370 list_del(&ch->list);
2371 spin_unlock_irq(&sdev->spinlock);
2372
2373 ib_destroy_cm_id(ch->cm_id);
2374
2375 if (ch->release_done)
2376 complete(ch->release_done);
2377
2378 wake_up(&sdev->ch_releaseQ);
2379
2380 kfree(ch);
2381}
2382
2383static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
2384 u8 i_port_id[16])
2385{
2386 struct srpt_node_acl *nacl;
2387
2388 list_for_each_entry(nacl, &sport->port_acl_list, list)
2389 if (memcmp(nacl->i_port_id, i_port_id,
2390 sizeof(nacl->i_port_id)) == 0)
2391 return nacl;
2392
2393 return NULL;
2394}
2395
2396static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
2397 u8 i_port_id[16])
2398{
2399 struct srpt_node_acl *nacl;
2400
2401 spin_lock_irq(&sport->port_acl_lock);
2402 nacl = __srpt_lookup_acl(sport, i_port_id);
2403 spin_unlock_irq(&sport->port_acl_lock);
2404
2405 return nacl;
2406}
2407
2408/**
2409 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2410 *
2411 * Ownership of the cm_id is transferred to the target session if this
2412 * functions returns zero. Otherwise the caller remains the owner of cm_id.
2413 */
2414static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2415 struct ib_cm_req_event_param *param,
2416 void *private_data)
2417{
2418 struct srpt_device *sdev = cm_id->context;
2419 struct srpt_port *sport = &sdev->port[param->port - 1];
2420 struct srp_login_req *req;
2421 struct srp_login_rsp *rsp;
2422 struct srp_login_rej *rej;
2423 struct ib_cm_rep_param *rep_param;
2424 struct srpt_rdma_ch *ch, *tmp_ch;
2425 struct srpt_node_acl *nacl;
2426 u32 it_iu_len;
2427 int i;
2428 int ret = 0;
2429
2430 WARN_ON_ONCE(irqs_disabled());
2431
2432 if (WARN_ON(!sdev || !private_data))
2433 return -EINVAL;
2434
2435 req = (struct srp_login_req *)private_data;
2436
2437 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2438
2439 printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
2440 " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
2441 " (guid=0x%llx:0x%llx)\n",
2442 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
2443 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
2444 be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
2445 be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
2446 it_iu_len,
2447 param->port,
2448 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
2449 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
2450
2451 rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
2452 rej = kzalloc(sizeof *rej, GFP_KERNEL);
2453 rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
2454
2455 if (!rsp || !rej || !rep_param) {
2456 ret = -ENOMEM;
2457 goto out;
2458 }
2459
2460 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2461 rej->reason = __constant_cpu_to_be32(
2462 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2463 ret = -EINVAL;
2464 printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
2465 " length (%d bytes) is out of range (%d .. %d)\n",
2466 it_iu_len, 64, srp_max_req_size);
2467 goto reject;
2468 }
2469
2470 if (!sport->enabled) {
2471 rej->reason = __constant_cpu_to_be32(
2472 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2473 ret = -EINVAL;
2474 printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
2475 " has not yet been enabled\n");
2476 goto reject;
2477 }
2478
2479 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2480 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2481
2482 spin_lock_irq(&sdev->spinlock);
2483
2484 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2485 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2486 && !memcmp(ch->t_port_id, req->target_port_id, 16)
2487 && param->port == ch->sport->port
2488 && param->listen_id == ch->sport->sdev->cm_id
2489 && ch->cm_id) {
2490 enum rdma_ch_state ch_state;
2491
2492 ch_state = srpt_get_ch_state(ch);
2493 if (ch_state != CH_CONNECTING
2494 && ch_state != CH_LIVE)
2495 continue;
2496
2497 /* found an existing channel */
2498 pr_debug("Found existing channel %s"
2499 " cm_id= %p state= %d\n",
2500 ch->sess_name, ch->cm_id, ch_state);
2501
2502 __srpt_close_ch(ch);
2503
2504 rsp->rsp_flags =
2505 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2506 }
2507 }
2508
2509 spin_unlock_irq(&sdev->spinlock);
2510
2511 } else
2512 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2513
2514 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2515 || *(__be64 *)(req->target_port_id + 8) !=
2516 cpu_to_be64(srpt_service_guid)) {
2517 rej->reason = __constant_cpu_to_be32(
2518 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2519 ret = -ENOMEM;
2520 printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
2521 " has an invalid target port identifier.\n");
2522 goto reject;
2523 }
2524
2525 ch = kzalloc(sizeof *ch, GFP_KERNEL);
2526 if (!ch) {
2527 rej->reason = __constant_cpu_to_be32(
2528 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2529 printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
2530 ret = -ENOMEM;
2531 goto reject;
2532 }
2533
2534 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2535 memcpy(ch->i_port_id, req->initiator_port_id, 16);
2536 memcpy(ch->t_port_id, req->target_port_id, 16);
2537 ch->sport = &sdev->port[param->port - 1];
2538 ch->cm_id = cm_id;
2539 /*
2540 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
2541 * for the SRP protocol to the command queue size.
2542 */
2543 ch->rq_size = SRPT_RQ_SIZE;
2544 spin_lock_init(&ch->spinlock);
2545 ch->state = CH_CONNECTING;
2546 INIT_LIST_HEAD(&ch->cmd_wait_list);
2547 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2548
2549 ch->ioctx_ring = (struct srpt_send_ioctx **)
2550 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2551 sizeof(*ch->ioctx_ring[0]),
2552 ch->rsp_size, DMA_TO_DEVICE);
2553 if (!ch->ioctx_ring)
2554 goto free_ch;
2555
2556 INIT_LIST_HEAD(&ch->free_list);
2557 for (i = 0; i < ch->rq_size; i++) {
2558 ch->ioctx_ring[i]->ch = ch;
2559 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2560 }
2561
2562 ret = srpt_create_ch_ib(ch);
2563 if (ret) {
2564 rej->reason = __constant_cpu_to_be32(
2565 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2566 printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
2567 " a new RDMA channel failed.\n");
2568 goto free_ring;
2569 }
2570
2571 ret = srpt_ch_qp_rtr(ch, ch->qp);
2572 if (ret) {
2573 rej->reason = __constant_cpu_to_be32(
2574 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2575 printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
2576 " RTR failed (error code = %d)\n", ret);
2577 goto destroy_ib;
2578 }
2579 /*
2580 * Use the initator port identifier as the session name.
2581 */
2582 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2583 be64_to_cpu(*(__be64 *)ch->i_port_id),
2584 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2585
2586 pr_debug("registering session %s\n", ch->sess_name);
2587
2588 nacl = srpt_lookup_acl(sport, ch->i_port_id);
2589 if (!nacl) {
2590 printk(KERN_INFO "Rejected login because no ACL has been"
2591 " configured yet for initiator %s.\n", ch->sess_name);
2592 rej->reason = __constant_cpu_to_be32(
2593 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2594 goto destroy_ib;
2595 }
2596
2597 ch->sess = transport_init_session();
2598 if (!ch->sess) {
2599 rej->reason = __constant_cpu_to_be32(
2600 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2601 pr_debug("Failed to create session\n");
2602 goto deregister_session;
2603 }
2604 ch->sess->se_node_acl = &nacl->nacl;
2605 transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
2606
2607 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2608 ch->sess_name, ch->cm_id);
2609
2610 /* create srp_login_response */
2611 rsp->opcode = SRP_LOGIN_RSP;
2612 rsp->tag = req->tag;
2613 rsp->max_it_iu_len = req->req_it_iu_len;
2614 rsp->max_ti_iu_len = req->req_it_iu_len;
2615 ch->max_ti_iu_len = it_iu_len;
2616 rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2617 | SRP_BUF_FORMAT_INDIRECT);
2618 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2619 atomic_set(&ch->req_lim, ch->rq_size);
2620 atomic_set(&ch->req_lim_delta, 0);
2621
2622 /* create cm reply */
2623 rep_param->qp_num = ch->qp->qp_num;
2624 rep_param->private_data = (void *)rsp;
2625 rep_param->private_data_len = sizeof *rsp;
2626 rep_param->rnr_retry_count = 7;
2627 rep_param->flow_control = 1;
2628 rep_param->failover_accepted = 0;
2629 rep_param->srq = 1;
2630 rep_param->responder_resources = 4;
2631 rep_param->initiator_depth = 4;
2632
2633 ret = ib_send_cm_rep(cm_id, rep_param);
2634 if (ret) {
2635 printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
2636 " (error code = %d)\n", ret);
2637 goto release_channel;
2638 }
2639
2640 spin_lock_irq(&sdev->spinlock);
2641 list_add_tail(&ch->list, &sdev->rch_list);
2642 spin_unlock_irq(&sdev->spinlock);
2643
2644 goto out;
2645
2646release_channel:
2647 srpt_set_ch_state(ch, CH_RELEASING);
2648 transport_deregister_session_configfs(ch->sess);
2649
2650deregister_session:
2651 transport_deregister_session(ch->sess);
2652 ch->sess = NULL;
2653
2654destroy_ib:
2655 srpt_destroy_ch_ib(ch);
2656
2657free_ring:
2658 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2659 ch->sport->sdev, ch->rq_size,
2660 ch->rsp_size, DMA_TO_DEVICE);
2661free_ch:
2662 kfree(ch);
2663
2664reject:
2665 rej->opcode = SRP_LOGIN_REJ;
2666 rej->tag = req->tag;
2667 rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2668 | SRP_BUF_FORMAT_INDIRECT);
2669
2670 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2671 (void *)rej, sizeof *rej);
2672
2673out:
2674 kfree(rep_param);
2675 kfree(rsp);
2676 kfree(rej);
2677
2678 return ret;
2679}
2680
2681static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
2682{
2683 printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
2684 srpt_drain_channel(cm_id);
2685}
2686
2687/**
2688 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2689 *
2690 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2691 * and that the recipient may begin transmitting (RTU = ready to use).
2692 */
2693static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
2694{
2695 struct srpt_rdma_ch *ch;
2696 int ret;
2697
2698 ch = srpt_find_channel(cm_id->context, cm_id);
2699 BUG_ON(!ch);
2700
2701 if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
2702 struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
2703
2704 ret = srpt_ch_qp_rts(ch, ch->qp);
2705
2706 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
2707 wait_list) {
2708 list_del(&ioctx->wait_list);
2709 srpt_handle_new_iu(ch, ioctx, NULL);
2710 }
2711 if (ret)
2712 srpt_close_ch(ch);
2713 }
2714}
2715
2716static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
2717{
2718 printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
2719 srpt_drain_channel(cm_id);
2720}
2721
2722static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
2723{
2724 printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
2725 srpt_drain_channel(cm_id);
2726}
2727
2728/**
2729 * srpt_cm_dreq_recv() - Process reception of a DREQ message.
2730 */
2731static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
2732{
2733 struct srpt_rdma_ch *ch;
2734 unsigned long flags;
2735 bool send_drep = false;
2736
2737 ch = srpt_find_channel(cm_id->context, cm_id);
2738 BUG_ON(!ch);
2739
2740 pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
2741
2742 spin_lock_irqsave(&ch->spinlock, flags);
2743 switch (ch->state) {
2744 case CH_CONNECTING:
2745 case CH_LIVE:
2746 send_drep = true;
2747 ch->state = CH_DISCONNECTING;
2748 break;
2749 case CH_DISCONNECTING:
2750 case CH_DRAINING:
2751 case CH_RELEASING:
2752 WARN(true, "unexpected channel state %d\n", ch->state);
2753 break;
2754 }
2755 spin_unlock_irqrestore(&ch->spinlock, flags);
2756
2757 if (send_drep) {
2758 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
2759 printk(KERN_ERR "Sending IB DREP failed.\n");
2760 printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
2761 ch->sess_name);
2762 }
2763}
2764
2765/**
2766 * srpt_cm_drep_recv() - Process reception of a DREP message.
2767 */
2768static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
2769{
2770 printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
2771 cm_id);
2772 srpt_drain_channel(cm_id);
2773}
2774
2775/**
2776 * srpt_cm_handler() - IB connection manager callback function.
2777 *
2778 * A non-zero return value will cause the caller destroy the CM ID.
2779 *
2780 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2781 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2782 * a non-zero value in any other case will trigger a race with the
2783 * ib_destroy_cm_id() call in srpt_release_channel().
2784 */
2785static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2786{
2787 int ret;
2788
2789 ret = 0;
2790 switch (event->event) {
2791 case IB_CM_REQ_RECEIVED:
2792 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2793 event->private_data);
2794 break;
2795 case IB_CM_REJ_RECEIVED:
2796 srpt_cm_rej_recv(cm_id);
2797 break;
2798 case IB_CM_RTU_RECEIVED:
2799 case IB_CM_USER_ESTABLISHED:
2800 srpt_cm_rtu_recv(cm_id);
2801 break;
2802 case IB_CM_DREQ_RECEIVED:
2803 srpt_cm_dreq_recv(cm_id);
2804 break;
2805 case IB_CM_DREP_RECEIVED:
2806 srpt_cm_drep_recv(cm_id);
2807 break;
2808 case IB_CM_TIMEWAIT_EXIT:
2809 srpt_cm_timewait_exit(cm_id);
2810 break;
2811 case IB_CM_REP_ERROR:
2812 srpt_cm_rep_error(cm_id);
2813 break;
2814 case IB_CM_DREQ_ERROR:
2815 printk(KERN_INFO "Received IB DREQ ERROR event.\n");
2816 break;
2817 case IB_CM_MRA_RECEIVED:
2818 printk(KERN_INFO "Received IB MRA event\n");
2819 break;
2820 default:
2821 printk(KERN_ERR "received unrecognized IB CM event %d\n",
2822 event->event);
2823 break;
2824 }
2825
2826 return ret;
2827}
2828
2829/**
2830 * srpt_perform_rdmas() - Perform IB RDMA.
2831 *
2832 * Returns zero upon success or a negative number upon failure.
2833 */
2834static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2835 struct srpt_send_ioctx *ioctx)
2836{
2837 struct ib_send_wr wr;
2838 struct ib_send_wr *bad_wr;
2839 struct rdma_iu *riu;
2840 int i;
2841 int ret;
2842 int sq_wr_avail;
2843 enum dma_data_direction dir;
2844 const int n_rdma = ioctx->n_rdma;
2845
2846 dir = ioctx->cmd.data_direction;
2847 if (dir == DMA_TO_DEVICE) {
2848 /* write */
2849 ret = -ENOMEM;
2850 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2851 if (sq_wr_avail < 0) {
2852 printk(KERN_WARNING "IB send queue full (needed %d)\n",
2853 n_rdma);
2854 goto out;
2855 }
2856 }
2857
2858 ioctx->rdma_aborted = false;
2859 ret = 0;
2860 riu = ioctx->rdma_ius;
2861 memset(&wr, 0, sizeof wr);
2862
2863 for (i = 0; i < n_rdma; ++i, ++riu) {
2864 if (dir == DMA_FROM_DEVICE) {
2865 wr.opcode = IB_WR_RDMA_WRITE;
2866 wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2867 SRPT_RDMA_WRITE_LAST :
2868 SRPT_RDMA_MID,
2869 ioctx->ioctx.index);
2870 } else {
2871 wr.opcode = IB_WR_RDMA_READ;
2872 wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2873 SRPT_RDMA_READ_LAST :
2874 SRPT_RDMA_MID,
2875 ioctx->ioctx.index);
2876 }
2877 wr.next = NULL;
2878 wr.wr.rdma.remote_addr = riu->raddr;
2879 wr.wr.rdma.rkey = riu->rkey;
2880 wr.num_sge = riu->sge_cnt;
2881 wr.sg_list = riu->sge;
2882
2883 /* only get completion event for the last rdma write */
2884 if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
2885 wr.send_flags = IB_SEND_SIGNALED;
2886
2887 ret = ib_post_send(ch->qp, &wr, &bad_wr);
2888 if (ret)
2889 break;
2890 }
2891
2892 if (ret)
2893 printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
2894 __func__, __LINE__, ret, i, n_rdma);
2895 if (ret && i > 0) {
2896 wr.num_sge = 0;
2897 wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
2898 wr.send_flags = IB_SEND_SIGNALED;
2899 while (ch->state == CH_LIVE &&
2900 ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
2901 printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
2902 ioctx->ioctx.index);
2903 msleep(1000);
2904 }
2905 while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
2906 printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
2907 ioctx->ioctx.index);
2908 msleep(1000);
2909 }
2910 }
2911out:
2912 if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
2913 atomic_add(n_rdma, &ch->sq_wr_avail);
2914 return ret;
2915}
2916
2917/**
2918 * srpt_xfer_data() - Start data transfer from initiator to target.
2919 */
2920static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2921 struct srpt_send_ioctx *ioctx)
2922{
2923 int ret;
2924
2925 ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2926 if (ret) {
2927 printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
2928 goto out;
2929 }
2930
2931 ret = srpt_perform_rdmas(ch, ioctx);
2932 if (ret) {
2933 if (ret == -EAGAIN || ret == -ENOMEM)
2934 printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
2935 __func__, __LINE__, ret);
2936 else
2937 printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
2938 __func__, __LINE__, ret);
2939 goto out_unmap;
2940 }
2941
2942out:
2943 return ret;
2944out_unmap:
2945 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2946 goto out;
2947}
2948
2949static int srpt_write_pending_status(struct se_cmd *se_cmd)
2950{
2951 struct srpt_send_ioctx *ioctx;
2952
2953 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2954 return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2955}
2956
2957/*
2958 * srpt_write_pending() - Start data transfer from initiator to target (write).
2959 */
2960static int srpt_write_pending(struct se_cmd *se_cmd)
2961{
2962 struct srpt_rdma_ch *ch;
2963 struct srpt_send_ioctx *ioctx;
2964 enum srpt_command_state new_state;
2965 enum rdma_ch_state ch_state;
2966 int ret;
2967
2968 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2969
2970 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2971 WARN_ON(new_state == SRPT_STATE_DONE);
2972
2973 ch = ioctx->ch;
2974 BUG_ON(!ch);
2975
2976 ch_state = srpt_get_ch_state(ch);
2977 switch (ch_state) {
2978 case CH_CONNECTING:
2979 WARN(true, "unexpected channel state %d\n", ch_state);
2980 ret = -EINVAL;
2981 goto out;
2982 case CH_LIVE:
2983 break;
2984 case CH_DISCONNECTING:
2985 case CH_DRAINING:
2986 case CH_RELEASING:
2987 pr_debug("cmd with tag %lld: channel disconnecting\n",
2988 ioctx->tag);
2989 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2990 ret = -EINVAL;
2991 goto out;
2992 }
2993 ret = srpt_xfer_data(ch, ioctx);
2994
2995out:
2996 return ret;
2997}
2998
2999static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
3000{
3001 switch (tcm_mgmt_status) {
3002 case TMR_FUNCTION_COMPLETE:
3003 return SRP_TSK_MGMT_SUCCESS;
3004 case TMR_FUNCTION_REJECTED:
3005 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
3006 }
3007 return SRP_TSK_MGMT_FAILED;
3008}
3009
3010/**
3011 * srpt_queue_response() - Transmits the response to a SCSI command.
3012 *
3013 * Callback function called by the TCM core. Must not block since it can be
3014 * invoked on the context of the IB completion handler.
3015 */
3016static int srpt_queue_response(struct se_cmd *cmd)
3017{
3018 struct srpt_rdma_ch *ch;
3019 struct srpt_send_ioctx *ioctx;
3020 enum srpt_command_state state;
3021 unsigned long flags;
3022 int ret;
3023 enum dma_data_direction dir;
3024 int resp_len;
3025 u8 srp_tm_status;
3026
3027 ret = 0;
3028
3029 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3030 ch = ioctx->ch;
3031 BUG_ON(!ch);
3032
3033 spin_lock_irqsave(&ioctx->spinlock, flags);
3034 state = ioctx->state;
3035 switch (state) {
3036 case SRPT_STATE_NEW:
3037 case SRPT_STATE_DATA_IN:
3038 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
3039 break;
3040 case SRPT_STATE_MGMT:
3041 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
3042 break;
3043 default:
3044 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
3045 ch, ioctx->ioctx.index, ioctx->state);
3046 break;
3047 }
3048 spin_unlock_irqrestore(&ioctx->spinlock, flags);
3049
3050 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
3051 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
3052 atomic_inc(&ch->req_lim_delta);
3053 srpt_abort_cmd(ioctx);
3054 goto out;
3055 }
3056
3057 dir = ioctx->cmd.data_direction;
3058
3059 /* For read commands, transfer the data to the initiator. */
3060 if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
3061 !ioctx->queue_status_only) {
3062 ret = srpt_xfer_data(ch, ioctx);
3063 if (ret) {
3064 printk(KERN_ERR "xfer_data failed for tag %llu\n",
3065 ioctx->tag);
3066 goto out;
3067 }
3068 }
3069
3070 if (state != SRPT_STATE_MGMT)
3071 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
3072 cmd->scsi_status);
3073 else {
3074 srp_tm_status
3075 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
3076 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
3077 ioctx->tag);
3078 }
3079 ret = srpt_post_send(ch, ioctx, resp_len);
3080 if (ret) {
3081 printk(KERN_ERR "sending cmd response failed for tag %llu\n",
3082 ioctx->tag);
3083 srpt_unmap_sg_to_ib_sge(ch, ioctx);
3084 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
3085 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
3086 }
3087
3088out:
3089 return ret;
3090}
3091
3092static int srpt_queue_status(struct se_cmd *cmd)
3093{
3094 struct srpt_send_ioctx *ioctx;
3095
3096 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3097 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
3098 if (cmd->se_cmd_flags &
3099 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
3100 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
3101 ioctx->queue_status_only = true;
3102 return srpt_queue_response(cmd);
3103}
3104
3105static void srpt_refresh_port_work(struct work_struct *work)
3106{
3107 struct srpt_port *sport = container_of(work, struct srpt_port, work);
3108
3109 srpt_refresh_port(sport);
3110}
3111
3112static int srpt_ch_list_empty(struct srpt_device *sdev)
3113{
3114 int res;
3115
3116 spin_lock_irq(&sdev->spinlock);
3117 res = list_empty(&sdev->rch_list);
3118 spin_unlock_irq(&sdev->spinlock);
3119
3120 return res;
3121}
3122
3123/**
3124 * srpt_release_sdev() - Free the channel resources associated with a target.
3125 */
3126static int srpt_release_sdev(struct srpt_device *sdev)
3127{
3128 struct srpt_rdma_ch *ch, *tmp_ch;
3129 int res;
3130
3131 WARN_ON_ONCE(irqs_disabled());
3132
3133 BUG_ON(!sdev);
3134
3135 spin_lock_irq(&sdev->spinlock);
3136 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
3137 __srpt_close_ch(ch);
3138 spin_unlock_irq(&sdev->spinlock);
3139
3140 res = wait_event_interruptible(sdev->ch_releaseQ,
3141 srpt_ch_list_empty(sdev));
3142 if (res)
3143 printk(KERN_ERR "%s: interrupted.\n", __func__);
3144
3145 return 0;
3146}
3147
3148static struct srpt_port *__srpt_lookup_port(const char *name)
3149{
3150 struct ib_device *dev;
3151 struct srpt_device *sdev;
3152 struct srpt_port *sport;
3153 int i;
3154
3155 list_for_each_entry(sdev, &srpt_dev_list, list) {
3156 dev = sdev->device;
3157 if (!dev)
3158 continue;
3159
3160 for (i = 0; i < dev->phys_port_cnt; i++) {
3161 sport = &sdev->port[i];
3162
3163 if (!strcmp(sport->port_guid, name))
3164 return sport;
3165 }
3166 }
3167
3168 return NULL;
3169}
3170
3171static struct srpt_port *srpt_lookup_port(const char *name)
3172{
3173 struct srpt_port *sport;
3174
3175 spin_lock(&srpt_dev_lock);
3176 sport = __srpt_lookup_port(name);
3177 spin_unlock(&srpt_dev_lock);
3178
3179 return sport;
3180}
3181
3182/**
3183 * srpt_add_one() - Infiniband device addition callback function.
3184 */
3185static void srpt_add_one(struct ib_device *device)
3186{
3187 struct srpt_device *sdev;
3188 struct srpt_port *sport;
3189 struct ib_srq_init_attr srq_attr;
3190 int i;
3191
3192 pr_debug("device = %p, device->dma_ops = %p\n", device,
3193 device->dma_ops);
3194
3195 sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
3196 if (!sdev)
3197 goto err;
3198
3199 sdev->device = device;
3200 INIT_LIST_HEAD(&sdev->rch_list);
3201 init_waitqueue_head(&sdev->ch_releaseQ);
3202 spin_lock_init(&sdev->spinlock);
3203
3204 if (ib_query_device(device, &sdev->dev_attr))
3205 goto free_dev;
3206
3207 sdev->pd = ib_alloc_pd(device);
3208 if (IS_ERR(sdev->pd))
3209 goto free_dev;
3210
3211 sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
3212 if (IS_ERR(sdev->mr))
3213 goto err_pd;
3214
3215 sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
3216
3217 srq_attr.event_handler = srpt_srq_event;
3218 srq_attr.srq_context = (void *)sdev;
3219 srq_attr.attr.max_wr = sdev->srq_size;
3220 srq_attr.attr.max_sge = 1;
3221 srq_attr.attr.srq_limit = 0;
3222
3223 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
3224 if (IS_ERR(sdev->srq))
3225 goto err_mr;
3226
3227 pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
3228 __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
3229 device->name);
3230
3231 if (!srpt_service_guid)
3232 srpt_service_guid = be64_to_cpu(device->node_guid);
3233
3234 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3235 if (IS_ERR(sdev->cm_id))
3236 goto err_srq;
3237
3238 /* print out target login information */
3239 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
3240 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
3241 srpt_service_guid, srpt_service_guid);
3242
3243 /*
3244 * We do not have a consistent service_id (ie. also id_ext of target_id)
3245 * to identify this target. We currently use the guid of the first HCA
3246 * in the system as service_id; therefore, the target_id will change
3247 * if this HCA is gone bad and replaced by different HCA
3248 */
3249 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
3250 goto err_cm;
3251
3252 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3253 srpt_event_handler);
3254 if (ib_register_event_handler(&sdev->event_handler))
3255 goto err_cm;
3256
3257 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3258 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3259 sizeof(*sdev->ioctx_ring[0]),
3260 srp_max_req_size, DMA_FROM_DEVICE);
3261 if (!sdev->ioctx_ring)
3262 goto err_event;
3263
3264 for (i = 0; i < sdev->srq_size; ++i)
3265 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
3266
3267 WARN_ON(sdev->device->phys_port_cnt
3268 > sizeof(sdev->port)/sizeof(sdev->port[0]));
3269
3270 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3271 sport = &sdev->port[i - 1];
3272 sport->sdev = sdev;
3273 sport->port = i;
3274 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3275 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3276 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3277 INIT_WORK(&sport->work, srpt_refresh_port_work);
3278 INIT_LIST_HEAD(&sport->port_acl_list);
3279 spin_lock_init(&sport->port_acl_lock);
3280
3281 if (srpt_refresh_port(sport)) {
3282 printk(KERN_ERR "MAD registration failed for %s-%d.\n",
3283 srpt_sdev_name(sdev), i);
3284 goto err_ring;
3285 }
3286 snprintf(sport->port_guid, sizeof(sport->port_guid),
3287 "0x%016llx%016llx",
3288 be64_to_cpu(sport->gid.global.subnet_prefix),
3289 be64_to_cpu(sport->gid.global.interface_id));
3290 }
3291
3292 spin_lock(&srpt_dev_lock);
3293 list_add_tail(&sdev->list, &srpt_dev_list);
3294 spin_unlock(&srpt_dev_lock);
3295
3296out:
3297 ib_set_client_data(device, &srpt_client, sdev);
3298 pr_debug("added %s.\n", device->name);
3299 return;
3300
3301err_ring:
3302 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3303 sdev->srq_size, srp_max_req_size,
3304 DMA_FROM_DEVICE);
3305err_event:
3306 ib_unregister_event_handler(&sdev->event_handler);
3307err_cm:
3308 ib_destroy_cm_id(sdev->cm_id);
3309err_srq:
3310 ib_destroy_srq(sdev->srq);
3311err_mr:
3312 ib_dereg_mr(sdev->mr);
3313err_pd:
3314 ib_dealloc_pd(sdev->pd);
3315free_dev:
3316 kfree(sdev);
3317err:
3318 sdev = NULL;
3319 printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
3320 goto out;
3321}
3322
3323/**
3324 * srpt_remove_one() - InfiniBand device removal callback function.
3325 */
3326static void srpt_remove_one(struct ib_device *device)
3327{
3328 struct srpt_device *sdev;
3329 int i;
3330
3331 sdev = ib_get_client_data(device, &srpt_client);
3332 if (!sdev) {
3333 printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
3334 device->name);
3335 return;
3336 }
3337
3338 srpt_unregister_mad_agent(sdev);
3339
3340 ib_unregister_event_handler(&sdev->event_handler);
3341
3342 /* Cancel any work queued by the just unregistered IB event handler. */
3343 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3344 cancel_work_sync(&sdev->port[i].work);
3345
3346 ib_destroy_cm_id(sdev->cm_id);
3347
3348 /*
3349 * Unregistering a target must happen after destroying sdev->cm_id
3350 * such that no new SRP_LOGIN_REQ information units can arrive while
3351 * destroying the target.
3352 */
3353 spin_lock(&srpt_dev_lock);
3354 list_del(&sdev->list);
3355 spin_unlock(&srpt_dev_lock);
3356 srpt_release_sdev(sdev);
3357
3358 ib_destroy_srq(sdev->srq);
3359 ib_dereg_mr(sdev->mr);
3360 ib_dealloc_pd(sdev->pd);
3361
3362 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3363 sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
3364 sdev->ioctx_ring = NULL;
3365 kfree(sdev);
3366}
3367
3368static struct ib_client srpt_client = {
3369 .name = DRV_NAME,
3370 .add = srpt_add_one,
3371 .remove = srpt_remove_one
3372};
3373
3374static int srpt_check_true(struct se_portal_group *se_tpg)
3375{
3376 return 1;
3377}
3378
3379static int srpt_check_false(struct se_portal_group *se_tpg)
3380{
3381 return 0;
3382}
3383
3384static char *srpt_get_fabric_name(void)
3385{
3386 return "srpt";
3387}
3388
3389static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
3390{
3391 return SCSI_TRANSPORTID_PROTOCOLID_SRP;
3392}
3393
3394static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
3395{
3396 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3397
3398 return sport->port_guid;
3399}
3400
3401static u16 srpt_get_tag(struct se_portal_group *tpg)
3402{
3403 return 1;
3404}
3405
3406static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
3407{
3408 return 1;
3409}
3410
3411static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
3412 struct se_node_acl *se_nacl,
3413 struct t10_pr_registration *pr_reg,
3414 int *format_code, unsigned char *buf)
3415{
3416 struct srpt_node_acl *nacl;
3417 struct spc_rdma_transport_id *tr_id;
3418
3419 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3420 tr_id = (void *)buf;
3421 tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
3422 memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
3423 return sizeof(*tr_id);
3424}
3425
3426static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
3427 struct se_node_acl *se_nacl,
3428 struct t10_pr_registration *pr_reg,
3429 int *format_code)
3430{
3431 *format_code = 0;
3432 return sizeof(struct spc_rdma_transport_id);
3433}
3434
3435static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
3436 const char *buf, u32 *out_tid_len,
3437 char **port_nexus_ptr)
3438{
3439 struct spc_rdma_transport_id *tr_id;
3440
3441 *port_nexus_ptr = NULL;
3442 *out_tid_len = sizeof(struct spc_rdma_transport_id);
3443 tr_id = (void *)buf;
3444 return (char *)tr_id->i_port_id;
3445}
3446
3447static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
3448{
3449 struct srpt_node_acl *nacl;
3450
3451 nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
3452 if (!nacl) {
3453 printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
3454 return NULL;
3455 }
3456
3457 return &nacl->nacl;
3458}
3459
3460static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
3461 struct se_node_acl *se_nacl)
3462{
3463 struct srpt_node_acl *nacl;
3464
3465 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3466 kfree(nacl);
3467}
3468
3469static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3470{
3471 return 1;
3472}
3473
3474static void srpt_release_cmd(struct se_cmd *se_cmd)
3475{
3476}
3477
3478/**
3479 * srpt_shutdown_session() - Whether or not a session may be shut down.
3480 */
3481static int srpt_shutdown_session(struct se_session *se_sess)
3482{
3483 return true;
3484}
3485
3486/**
3487 * srpt_close_session() - Forcibly close a session.
3488 *
3489 * Callback function invoked by the TCM core to clean up sessions associated
3490 * with a node ACL when the user invokes
3491 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3492 */
3493static void srpt_close_session(struct se_session *se_sess)
3494{
3495 DECLARE_COMPLETION_ONSTACK(release_done);
3496 struct srpt_rdma_ch *ch;
3497 struct srpt_device *sdev;
3498 int res;
3499
3500 ch = se_sess->fabric_sess_ptr;
3501 WARN_ON(ch->sess != se_sess);
3502
3503 pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
3504
3505 sdev = ch->sport->sdev;
3506 spin_lock_irq(&sdev->spinlock);
3507 BUG_ON(ch->release_done);
3508 ch->release_done = &release_done;
3509 __srpt_close_ch(ch);
3510 spin_unlock_irq(&sdev->spinlock);
3511
3512 res = wait_for_completion_timeout(&release_done, 60 * HZ);
3513 WARN_ON(res <= 0);
3514}
3515
3516/**
3517 * To do: Find out whether stop_session() has a meaning for transports
3518 * other than iSCSI.
3519 */
3520static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
3521 int conn_sleep)
3522{
3523}
3524
3525static void srpt_reset_nexus(struct se_session *sess)
3526{
3527 printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
3528}
3529
3530static int srpt_sess_logged_in(struct se_session *se_sess)
3531{
3532 return true;
3533}
3534
3535/**
3536 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
3537 *
3538 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3539 * This object represents an arbitrary integer used to uniquely identify a
3540 * particular attached remote initiator port to a particular SCSI target port
3541 * within a particular SCSI target device within a particular SCSI instance.
3542 */
3543static u32 srpt_sess_get_index(struct se_session *se_sess)
3544{
3545 return 0;
3546}
3547
3548static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3549{
3550}
3551
3552static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
3553{
3554 struct srpt_send_ioctx *ioctx;
3555
3556 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3557 return ioctx->tag;
3558}
3559
3560/* Note: only used from inside debug printk's by the TCM core. */
3561static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3562{
3563 struct srpt_send_ioctx *ioctx;
3564
3565 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3566 return srpt_get_cmd_state(ioctx);
3567}
3568
3569static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
3570{
3571 return 0;
3572}
3573
3574static u16 srpt_get_fabric_sense_len(void)
3575{
3576 return 0;
3577}
3578
3579static int srpt_is_state_remove(struct se_cmd *se_cmd)
3580{
3581 return 0;
3582}
3583
3584/**
3585 * srpt_parse_i_port_id() - Parse an initiator port ID.
3586 * @name: ASCII representation of a 128-bit initiator port ID.
3587 * @i_port_id: Binary 128-bit port ID.
3588 */
3589static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3590{
3591 const char *p;
3592 unsigned len, count, leading_zero_bytes;
3593 int ret, rc;
3594
3595 p = name;
3596 if (strnicmp(p, "0x", 2) == 0)
3597 p += 2;
3598 ret = -EINVAL;
3599 len = strlen(p);
3600 if (len % 2)
3601 goto out;
3602 count = min(len / 2, 16U);
3603 leading_zero_bytes = 16 - count;
3604 memset(i_port_id, 0, leading_zero_bytes);
3605 rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
3606 if (rc < 0)
3607 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
3608 ret = 0;
3609out:
3610 return ret;
3611}
3612
3613/*
3614 * configfs callback function invoked for
3615 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3616 */
3617static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
3618 struct config_group *group,
3619 const char *name)
3620{
3621 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3622 struct se_node_acl *se_nacl, *se_nacl_new;
3623 struct srpt_node_acl *nacl;
3624 int ret = 0;
3625 u32 nexus_depth = 1;
3626 u8 i_port_id[16];
3627
3628 if (srpt_parse_i_port_id(i_port_id, name) < 0) {
3629 printk(KERN_ERR "invalid initiator port ID %s\n", name);
3630 ret = -EINVAL;
3631 goto err;
3632 }
3633
3634 se_nacl_new = srpt_alloc_fabric_acl(tpg);
3635 if (!se_nacl_new) {
3636 ret = -ENOMEM;
3637 goto err;
3638 }
3639 /*
3640 * nacl_new may be released by core_tpg_add_initiator_node_acl()
3641 * when converting a node ACL from demo mode to explict
3642 */
3643 se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
3644 nexus_depth);
3645 if (IS_ERR(se_nacl)) {
3646 ret = PTR_ERR(se_nacl);
3647 goto err;
3648 }
3649 /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
3650 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3651 memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
3652 nacl->sport = sport;
3653
3654 spin_lock_irq(&sport->port_acl_lock);
3655 list_add_tail(&nacl->list, &sport->port_acl_list);
3656 spin_unlock_irq(&sport->port_acl_lock);
3657
3658 return se_nacl;
3659err:
3660 return ERR_PTR(ret);
3661}
3662
3663/*
3664 * configfs callback function invoked for
3665 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3666 */
3667static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
3668{
3669 struct srpt_node_acl *nacl;
3670 struct srpt_device *sdev;
3671 struct srpt_port *sport;
3672
3673 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3674 sport = nacl->sport;
3675 sdev = sport->sdev;
3676 spin_lock_irq(&sport->port_acl_lock);
3677 list_del(&nacl->list);
3678 spin_unlock_irq(&sport->port_acl_lock);
3679 core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
3680 srpt_release_fabric_acl(NULL, se_nacl);
3681}
3682
3683static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
3684 struct se_portal_group *se_tpg,
3685 char *page)
3686{
3687 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3688
3689 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3690}
3691
3692static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
3693 struct se_portal_group *se_tpg,
3694 const char *page,
3695 size_t count)
3696{
3697 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3698 unsigned long val;
3699 int ret;
3700
3701 ret = strict_strtoul(page, 0, &val);
3702 if (ret < 0) {
3703 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3704 return -EINVAL;
3705 }
3706 if (val > MAX_SRPT_RDMA_SIZE) {
3707 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3708 MAX_SRPT_RDMA_SIZE);
3709 return -EINVAL;
3710 }
3711 if (val < DEFAULT_MAX_RDMA_SIZE) {
3712 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3713 val, DEFAULT_MAX_RDMA_SIZE);
3714 return -EINVAL;
3715 }
3716 sport->port_attrib.srp_max_rdma_size = val;
3717
3718 return count;
3719}
3720
3721TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
3722
3723static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
3724 struct se_portal_group *se_tpg,
3725 char *page)
3726{
3727 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3728
3729 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3730}
3731
3732static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
3733 struct se_portal_group *se_tpg,
3734 const char *page,
3735 size_t count)
3736{
3737 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3738 unsigned long val;
3739 int ret;
3740
3741 ret = strict_strtoul(page, 0, &val);
3742 if (ret < 0) {
3743 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3744 return -EINVAL;
3745 }
3746 if (val > MAX_SRPT_RSP_SIZE) {
3747 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3748 MAX_SRPT_RSP_SIZE);
3749 return -EINVAL;
3750 }
3751 if (val < MIN_MAX_RSP_SIZE) {
3752 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3753 MIN_MAX_RSP_SIZE);
3754 return -EINVAL;
3755 }
3756 sport->port_attrib.srp_max_rsp_size = val;
3757
3758 return count;
3759}
3760
3761TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
3762
3763static ssize_t srpt_tpg_attrib_show_srp_sq_size(
3764 struct se_portal_group *se_tpg,
3765 char *page)
3766{
3767 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3768
3769 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3770}
3771
3772static ssize_t srpt_tpg_attrib_store_srp_sq_size(
3773 struct se_portal_group *se_tpg,
3774 const char *page,
3775 size_t count)
3776{
3777 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3778 unsigned long val;
3779 int ret;
3780
3781 ret = strict_strtoul(page, 0, &val);
3782 if (ret < 0) {
3783 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3784 return -EINVAL;
3785 }
3786 if (val > MAX_SRPT_SRQ_SIZE) {
3787 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3788 MAX_SRPT_SRQ_SIZE);
3789 return -EINVAL;
3790 }
3791 if (val < MIN_SRPT_SRQ_SIZE) {
3792 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3793 MIN_SRPT_SRQ_SIZE);
3794 return -EINVAL;
3795 }
3796 sport->port_attrib.srp_sq_size = val;
3797
3798 return count;
3799}
3800
3801TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
3802
3803static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3804 &srpt_tpg_attrib_srp_max_rdma_size.attr,
3805 &srpt_tpg_attrib_srp_max_rsp_size.attr,
3806 &srpt_tpg_attrib_srp_sq_size.attr,
3807 NULL,
3808};
3809
3810static ssize_t srpt_tpg_show_enable(
3811 struct se_portal_group *se_tpg,
3812 char *page)
3813{
3814 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3815
3816 return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3817}
3818
3819static ssize_t srpt_tpg_store_enable(
3820 struct se_portal_group *se_tpg,
3821 const char *page,
3822 size_t count)
3823{
3824 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3825 unsigned long tmp;
3826 int ret;
3827
3828 ret = strict_strtoul(page, 0, &tmp);
3829 if (ret < 0) {
3830 printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
3831 return -EINVAL;
3832 }
3833
3834 if ((tmp != 0) && (tmp != 1)) {
3835 printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3836 return -EINVAL;
3837 }
3838 if (tmp == 1)
3839 sport->enabled = true;
3840 else
3841 sport->enabled = false;
3842
3843 return count;
3844}
3845
3846TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
3847
3848static struct configfs_attribute *srpt_tpg_attrs[] = {
3849 &srpt_tpg_enable.attr,
3850 NULL,
3851};
3852
3853/**
3854 * configfs callback invoked for
3855 * mkdir /sys/kernel/config/target/$driver/$port/$tpg
3856 */
3857static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3858 struct config_group *group,
3859 const char *name)
3860{
3861 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3862 int res;
3863
3864 /* Initialize sport->port_wwn and sport->port_tpg_1 */
3865 res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
3866 &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
3867 if (res)
3868 return ERR_PTR(res);
3869
3870 return &sport->port_tpg_1;
3871}
3872
3873/**
3874 * configfs callback invoked for
3875 * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3876 */
3877static void srpt_drop_tpg(struct se_portal_group *tpg)
3878{
3879 struct srpt_port *sport = container_of(tpg,
3880 struct srpt_port, port_tpg_1);
3881
3882 sport->enabled = false;
3883 core_tpg_deregister(&sport->port_tpg_1);
3884}
3885
3886/**
3887 * configfs callback invoked for
3888 * mkdir /sys/kernel/config/target/$driver/$port
3889 */
3890static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3891 struct config_group *group,
3892 const char *name)
3893{
3894 struct srpt_port *sport;
3895 int ret;
3896
3897 sport = srpt_lookup_port(name);
3898 pr_debug("make_tport(%s)\n", name);
3899 ret = -EINVAL;
3900 if (!sport)
3901 goto err;
3902
3903 return &sport->port_wwn;
3904
3905err:
3906 return ERR_PTR(ret);
3907}
3908
3909/**
3910 * configfs callback invoked for
3911 * rmdir /sys/kernel/config/target/$driver/$port
3912 */
3913static void srpt_drop_tport(struct se_wwn *wwn)
3914{
3915 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3916
3917 pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
3918}
3919
3920static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
3921 char *buf)
3922{
3923 return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3924}
3925
3926TF_WWN_ATTR_RO(srpt, version);
3927
3928static struct configfs_attribute *srpt_wwn_attrs[] = {
3929 &srpt_wwn_version.attr,
3930 NULL,
3931};
3932
3933static struct target_core_fabric_ops srpt_template = {
3934 .get_fabric_name = srpt_get_fabric_name,
3935 .get_fabric_proto_ident = srpt_get_fabric_proto_ident,
3936 .tpg_get_wwn = srpt_get_fabric_wwn,
3937 .tpg_get_tag = srpt_get_tag,
3938 .tpg_get_default_depth = srpt_get_default_depth,
3939 .tpg_get_pr_transport_id = srpt_get_pr_transport_id,
3940 .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len,
3941 .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id,
3942 .tpg_check_demo_mode = srpt_check_false,
3943 .tpg_check_demo_mode_cache = srpt_check_true,
3944 .tpg_check_demo_mode_write_protect = srpt_check_true,
3945 .tpg_check_prod_mode_write_protect = srpt_check_false,
3946 .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl,
3947 .tpg_release_fabric_acl = srpt_release_fabric_acl,
3948 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3949 .release_cmd = srpt_release_cmd,
3950 .check_stop_free = srpt_check_stop_free,
3951 .shutdown_session = srpt_shutdown_session,
3952 .close_session = srpt_close_session,
3953 .stop_session = srpt_stop_session,
3954 .fall_back_to_erl0 = srpt_reset_nexus,
3955 .sess_logged_in = srpt_sess_logged_in,
3956 .sess_get_index = srpt_sess_get_index,
3957 .sess_get_initiator_sid = NULL,
3958 .write_pending = srpt_write_pending,
3959 .write_pending_status = srpt_write_pending_status,
3960 .set_default_node_attributes = srpt_set_default_node_attrs,
3961 .get_task_tag = srpt_get_task_tag,
3962 .get_cmd_state = srpt_get_tcm_cmd_state,
3963 .queue_data_in = srpt_queue_response,
3964 .queue_status = srpt_queue_status,
3965 .queue_tm_rsp = srpt_queue_response,
3966 .get_fabric_sense_len = srpt_get_fabric_sense_len,
3967 .set_fabric_sense_len = srpt_set_fabric_sense_len,
3968 .is_state_remove = srpt_is_state_remove,
3969 /*
3970 * Setup function pointers for generic logic in
3971 * target_core_fabric_configfs.c
3972 */
3973 .fabric_make_wwn = srpt_make_tport,
3974 .fabric_drop_wwn = srpt_drop_tport,
3975 .fabric_make_tpg = srpt_make_tpg,
3976 .fabric_drop_tpg = srpt_drop_tpg,
3977 .fabric_post_link = NULL,
3978 .fabric_pre_unlink = NULL,
3979 .fabric_make_np = NULL,
3980 .fabric_drop_np = NULL,
3981 .fabric_make_nodeacl = srpt_make_nodeacl,
3982 .fabric_drop_nodeacl = srpt_drop_nodeacl,
3983};
3984
3985/**
3986 * srpt_init_module() - Kernel module initialization.
3987 *
3988 * Note: Since ib_register_client() registers callback functions, and since at
3989 * least one of these callback functions (srpt_add_one()) calls target core
3990 * functions, this driver must be registered with the target core before
3991 * ib_register_client() is called.
3992 */
3993static int __init srpt_init_module(void)
3994{
3995 int ret;
3996
3997 ret = -EINVAL;
3998 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3999 printk(KERN_ERR "invalid value %d for kernel module parameter"
4000 " srp_max_req_size -- must be at least %d.\n",
4001 srp_max_req_size, MIN_MAX_REQ_SIZE);
4002 goto out;
4003 }
4004
4005 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
4006 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
4007 printk(KERN_ERR "invalid value %d for kernel module parameter"
4008 " srpt_srq_size -- must be in the range [%d..%d].\n",
4009 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
4010 goto out;
4011 }
4012
4013 spin_lock_init(&srpt_dev_lock);
4014 INIT_LIST_HEAD(&srpt_dev_list);
4015
4016 ret = -ENODEV;
4017 srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
4018 if (!srpt_target) {
4019 printk(KERN_ERR "couldn't register\n");
4020 goto out;
4021 }
4022
4023 srpt_target->tf_ops = srpt_template;
4024
4025 /* Enable SG chaining */
4026 srpt_target->tf_ops.task_sg_chaining = true;
4027
4028 /*
4029 * Set up default attribute lists.
4030 */
4031 srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
4032 srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
4033 srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
4034 srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
4035 srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
4036 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
4037 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
4038 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
4039 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
4040
4041 ret = target_fabric_configfs_register(srpt_target);
4042 if (ret < 0) {
4043 printk(KERN_ERR "couldn't register\n");
4044 goto out_free_target;
4045 }
4046
4047 ret = ib_register_client(&srpt_client);
4048 if (ret) {
4049 printk(KERN_ERR "couldn't register IB client\n");
4050 goto out_unregister_target;
4051 }
4052
4053 return 0;
4054
4055out_unregister_target:
4056 target_fabric_configfs_deregister(srpt_target);
4057 srpt_target = NULL;
4058out_free_target:
4059 if (srpt_target)
4060 target_fabric_configfs_free(srpt_target);
4061out:
4062 return ret;
4063}
4064
4065static void __exit srpt_cleanup_module(void)
4066{
4067 ib_unregister_client(&srpt_client);
4068 target_fabric_configfs_deregister(srpt_target);
4069 srpt_target = NULL;
4070}
4071
4072module_init(srpt_init_module);
4073module_exit(srpt_cleanup_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
new file mode 100644
index 00000000000..b4b4bbcd7f1
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -0,0 +1,444 @@
1/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#ifndef IB_SRPT_H
36#define IB_SRPT_H
37
38#include <linux/version.h>
39#include <linux/types.h>
40#include <linux/list.h>
41#include <linux/wait.h>
42
43#include <rdma/ib_verbs.h>
44#include <rdma/ib_sa.h>
45#include <rdma/ib_cm.h>
46
47#include <scsi/srp.h>
48
49#include "ib_dm_mad.h"
50
51/*
52 * The prefix the ServiceName field must start with in the device management
53 * ServiceEntries attribute pair. See also the SRP specification.
54 */
55#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
56
57enum {
58 /*
59 * SRP IOControllerProfile attributes for SRP target ports that have
60 * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
61 * in the SRP specification.
62 */
63 SRP_PROTOCOL = 0x0108,
64 SRP_PROTOCOL_VERSION = 0x0001,
65 SRP_IO_SUBCLASS = 0x609e,
66 SRP_SEND_TO_IOC = 0x01,
67 SRP_SEND_FROM_IOC = 0x02,
68 SRP_RDMA_READ_FROM_IOC = 0x08,
69 SRP_RDMA_WRITE_FROM_IOC = 0x20,
70
71 /*
72 * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
73 * specification.
74 */
75 SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
76 SRP_LOSOLNT = 0x10, /* logout solicited notification */
77 SRP_CRSOLNT = 0x20, /* credit request solicited notification */
78 SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
79
80 /*
81 * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
82 * 18 and 20 in the SRP specification.
83 */
84 SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
85 SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
86
87 /*
88 * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
89 * 16 and 22 in the SRP specification.
90 */
91 SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
92
93 /* See also table 24 in the SRP specification. */
94 SRP_TSK_MGMT_SUCCESS = 0x00,
95 SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
96 SRP_TSK_MGMT_FAILED = 0x05,
97
98 /* See also table 21 in the SRP specification. */
99 SRP_CMD_SIMPLE_Q = 0x0,
100 SRP_CMD_HEAD_OF_Q = 0x1,
101 SRP_CMD_ORDERED_Q = 0x2,
102 SRP_CMD_ACA = 0x4,
103
104 SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
105 SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
106 SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
107
108 SRPT_DEF_SG_TABLESIZE = 128,
109 SRPT_DEF_SG_PER_WQE = 16,
110
111 MIN_SRPT_SQ_SIZE = 16,
112 DEF_SRPT_SQ_SIZE = 4096,
113 SRPT_RQ_SIZE = 128,
114 MIN_SRPT_SRQ_SIZE = 4,
115 DEFAULT_SRPT_SRQ_SIZE = 4095,
116 MAX_SRPT_SRQ_SIZE = 65535,
117 MAX_SRPT_RDMA_SIZE = 1U << 24,
118 MAX_SRPT_RSP_SIZE = 1024,
119
120 MIN_MAX_REQ_SIZE = 996,
121 DEFAULT_MAX_REQ_SIZE
122 = sizeof(struct srp_cmd)/*48*/
123 + sizeof(struct srp_indirect_buf)/*20*/
124 + 128 * sizeof(struct srp_direct_buf)/*16*/,
125
126 MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
127 DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
128
129 DEFAULT_MAX_RDMA_SIZE = 65536,
130};
131
132enum srpt_opcode {
133 SRPT_RECV,
134 SRPT_SEND,
135 SRPT_RDMA_MID,
136 SRPT_RDMA_ABORT,
137 SRPT_RDMA_READ_LAST,
138 SRPT_RDMA_WRITE_LAST,
139};
140
141static inline u64 encode_wr_id(u8 opcode, u32 idx)
142{
143 return ((u64)opcode << 32) | idx;
144}
145static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
146{
147 return wr_id >> 32;
148}
149static inline u32 idx_from_wr_id(u64 wr_id)
150{
151 return (u32)wr_id;
152}
153
154struct rdma_iu {
155 u64 raddr;
156 u32 rkey;
157 struct ib_sge *sge;
158 u32 sge_cnt;
159 int mem_id;
160};
161
162/**
163 * enum srpt_command_state - SCSI command state managed by SRPT.
164 * @SRPT_STATE_NEW: New command arrived and is being processed.
165 * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
166 * for data arrival.
167 * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
168 * being processed.
169 * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
170 * @SRPT_STATE_MGMT: Processing a SCSI task management command.
171 * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
172 * @SRPT_STATE_DONE: Command processing finished successfully, command
173 * processing has been aborted or command processing
174 * failed.
175 */
176enum srpt_command_state {
177 SRPT_STATE_NEW = 0,
178 SRPT_STATE_NEED_DATA = 1,
179 SRPT_STATE_DATA_IN = 2,
180 SRPT_STATE_CMD_RSP_SENT = 3,
181 SRPT_STATE_MGMT = 4,
182 SRPT_STATE_MGMT_RSP_SENT = 5,
183 SRPT_STATE_DONE = 6,
184};
185
186/**
187 * struct srpt_ioctx - Shared SRPT I/O context information.
188 * @buf: Pointer to the buffer.
189 * @dma: DMA address of the buffer.
190 * @index: Index of the I/O context in its ioctx_ring array.
191 */
192struct srpt_ioctx {
193 void *buf;
194 dma_addr_t dma;
195 uint32_t index;
196};
197
198/**
199 * struct srpt_recv_ioctx - SRPT receive I/O context.
200 * @ioctx: See above.
201 * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
202 */
203struct srpt_recv_ioctx {
204 struct srpt_ioctx ioctx;
205 struct list_head wait_list;
206};
207
208/**
209 * struct srpt_send_ioctx - SRPT send I/O context.
210 * @ioctx: See above.
211 * @ch: Channel pointer.
212 * @free_list: Node in srpt_rdma_ch.free_list.
213 * @n_rbuf: Number of data buffers in the received SRP command.
214 * @rbufs: Pointer to SRP data buffer array.
215 * @single_rbuf: SRP data buffer if the command has only a single buffer.
216 * @sg: Pointer to sg-list associated with this I/O context.
217 * @sg_cnt: SG-list size.
218 * @mapped_sg_count: ib_dma_map_sg() return value.
219 * @n_rdma_ius: Number of elements in the rdma_ius array.
220 * @rdma_ius: Array with information about the RDMA mapping.
221 * @tag: Tag of the received SRP information unit.
222 * @spinlock: Protects 'state'.
223 * @state: I/O context state.
224 * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
225 * the already initiated transfers have finished.
226 * @cmd: Target core command data structure.
227 * @sense_data: SCSI sense data.
228 */
229struct srpt_send_ioctx {
230 struct srpt_ioctx ioctx;
231 struct srpt_rdma_ch *ch;
232 struct kref kref;
233 struct rdma_iu *rdma_ius;
234 struct srp_direct_buf *rbufs;
235 struct srp_direct_buf single_rbuf;
236 struct scatterlist *sg;
237 struct list_head free_list;
238 spinlock_t spinlock;
239 enum srpt_command_state state;
240 bool rdma_aborted;
241 struct se_cmd cmd;
242 struct completion tx_done;
243 u64 tag;
244 int sg_cnt;
245 int mapped_sg_count;
246 u16 n_rdma_ius;
247 u8 n_rdma;
248 u8 n_rbuf;
249 bool queue_status_only;
250 u8 sense_data[SCSI_SENSE_BUFFERSIZE];
251};
252
253/**
254 * enum rdma_ch_state - SRP channel state.
255 * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
256 * @CH_LIVE: QP is in RTS state.
257 * @CH_DISCONNECTING: DREQ has been received; waiting for DREP
258 * or DREQ has been send and waiting for DREP
259 * or .
260 * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
261 * @CH_RELEASING: Last WQE event has been received; releasing resources.
262 */
263enum rdma_ch_state {
264 CH_CONNECTING,
265 CH_LIVE,
266 CH_DISCONNECTING,
267 CH_DRAINING,
268 CH_RELEASING
269};
270
271/**
272 * struct srpt_rdma_ch - RDMA channel.
273 * @wait_queue: Allows the kernel thread to wait for more work.
274 * @thread: Kernel thread that processes the IB queues associated with
275 * the channel.
276 * @cm_id: IB CM ID associated with the channel.
277 * @qp: IB queue pair used for communicating over this channel.
278 * @cq: IB completion queue for this channel.
279 * @rq_size: IB receive queue size.
280 * @rsp_size IB response message size in bytes.
281 * @sq_wr_avail: number of work requests available in the send queue.
282 * @sport: pointer to the information of the HCA port used by this
283 * channel.
284 * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
285 * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
286 * @max_ti_iu_len: maximum target-to-initiator information unit length.
287 * @req_lim: request limit: maximum number of requests that may be sent
288 * by the initiator without having received a response.
289 * @req_lim_delta: Number of credits not yet sent back to the initiator.
290 * @spinlock: Protects free_list and state.
291 * @free_list: Head of list with free send I/O contexts.
292 * @state: channel state. See also enum rdma_ch_state.
293 * @ioctx_ring: Send ring.
294 * @wc: IB work completion array for srpt_process_completion().
295 * @list: Node for insertion in the srpt_device.rch_list list.
296 * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
297 * list contains struct srpt_ioctx elements and is protected
298 * against concurrent modification by the cm_id spinlock.
299 * @sess: Session information associated with this SRP channel.
300 * @sess_name: Session name.
301 * @release_work: Allows scheduling of srpt_release_channel().
302 * @release_done: Enables waiting for srpt_release_channel() completion.
303 */
304struct srpt_rdma_ch {
305 wait_queue_head_t wait_queue;
306 struct task_struct *thread;
307 struct ib_cm_id *cm_id;
308 struct ib_qp *qp;
309 struct ib_cq *cq;
310 int rq_size;
311 u32 rsp_size;
312 atomic_t sq_wr_avail;
313 struct srpt_port *sport;
314 u8 i_port_id[16];
315 u8 t_port_id[16];
316 int max_ti_iu_len;
317 atomic_t req_lim;
318 atomic_t req_lim_delta;
319 spinlock_t spinlock;
320 struct list_head free_list;
321 enum rdma_ch_state state;
322 struct srpt_send_ioctx **ioctx_ring;
323 struct ib_wc wc[16];
324 struct list_head list;
325 struct list_head cmd_wait_list;
326 struct se_session *sess;
327 u8 sess_name[36];
328 struct work_struct release_work;
329 struct completion *release_done;
330};
331
332/**
333 * struct srpt_port_attib - Attributes for SRPT port
334 * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
335 * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
336 * @srp_sq_size: Shared receive queue (SRQ) size.
337 */
338struct srpt_port_attrib {
339 u32 srp_max_rdma_size;
340 u32 srp_max_rsp_size;
341 u32 srp_sq_size;
342};
343
344/**
345 * struct srpt_port - Information associated by SRPT with a single IB port.
346 * @sdev: backpointer to the HCA information.
347 * @mad_agent: per-port management datagram processing information.
348 * @enabled: Whether or not this target port is enabled.
349 * @port_guid: ASCII representation of Port GUID
350 * @port: one-based port number.
351 * @sm_lid: cached value of the port's sm_lid.
352 * @lid: cached value of the port's lid.
353 * @gid: cached value of the port's gid.
354 * @port_acl_lock spinlock for port_acl_list:
355 * @work: work structure for refreshing the aforementioned cached values.
356 * @port_tpg_1 Target portal group = 1 data.
357 * @port_wwn: Target core WWN data.
358 * @port_acl_list: Head of the list with all node ACLs for this port.
359 */
360struct srpt_port {
361 struct srpt_device *sdev;
362 struct ib_mad_agent *mad_agent;
363 bool enabled;
364 u8 port_guid[64];
365 u8 port;
366 u16 sm_lid;
367 u16 lid;
368 union ib_gid gid;
369 spinlock_t port_acl_lock;
370 struct work_struct work;
371 struct se_portal_group port_tpg_1;
372 struct se_wwn port_wwn;
373 struct list_head port_acl_list;
374 struct srpt_port_attrib port_attrib;
375};
376
377/**
378 * struct srpt_device - Information associated by SRPT with a single HCA.
379 * @device: Backpointer to the struct ib_device managed by the IB core.
380 * @pd: IB protection domain.
381 * @mr: L_Key (local key) with write access to all local memory.
382 * @srq: Per-HCA SRQ (shared receive queue).
383 * @cm_id: Connection identifier.
384 * @dev_attr: Attributes of the InfiniBand device as obtained during the
385 * ib_client.add() callback.
386 * @srq_size: SRQ size.
387 * @ioctx_ring: Per-HCA SRQ.
388 * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
389 * @ch_releaseQ: Enables waiting for removal from rch_list.
390 * @spinlock: Protects rch_list and tpg.
391 * @port: Information about the ports owned by this HCA.
392 * @event_handler: Per-HCA asynchronous IB event handler.
393 * @list: Node in srpt_dev_list.
394 */
395struct srpt_device {
396 struct ib_device *device;
397 struct ib_pd *pd;
398 struct ib_mr *mr;
399 struct ib_srq *srq;
400 struct ib_cm_id *cm_id;
401 struct ib_device_attr dev_attr;
402 int srq_size;
403 struct srpt_recv_ioctx **ioctx_ring;
404 struct list_head rch_list;
405 wait_queue_head_t ch_releaseQ;
406 spinlock_t spinlock;
407 struct srpt_port port[2];
408 struct ib_event_handler event_handler;
409 struct list_head list;
410};
411
412/**
413 * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
414 * @i_port_id: 128-bit SRP initiator port ID.
415 * @sport: port information.
416 * @nacl: Target core node ACL information.
417 * @list: Element of the per-HCA ACL list.
418 */
419struct srpt_node_acl {
420 u8 i_port_id[16];
421 struct srpt_port *sport;
422 struct se_node_acl nacl;
423 struct list_head list;
424};
425
426/*
427 * SRP-releated SCSI persistent reservation definitions.
428 *
429 * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
430 * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
431 * SCSI over an RDMA interface).
432 */
433
434enum {
435 SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
436};
437
438struct spc_rdma_transport_id {
439 uint8_t protocol_identifier;
440 uint8_t reserved[7];
441 uint8_t i_port_id[16];
442};
443
444#endif /* IB_SRPT_H */
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 27555995f7e..b5ee3ebfcfc 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -24,6 +24,21 @@
24#include <linux/dvb/frontend.h> 24#include <linux/dvb/frontend.h>
25#include "dvb_frontend.h" 25#include "dvb_frontend.h"
26 26
27/* Registers (Write-only) */
28#define XREG_INIT 0x00
29#define XREG_RF_FREQ 0x02
30#define XREG_POWER_DOWN 0x08
31
32/* Registers (Read-only) */
33#define XREG_FREQ_ERROR 0x01
34#define XREG_LOCK 0x02
35#define XREG_VERSION 0x04
36#define XREG_PRODUCT_ID 0x08
37#define XREG_HSYNC_FREQ 0x10
38#define XREG_FRAME_LINES 0x20
39#define XREG_SNR 0x40
40
41#define XREG_ADC_ENV 0x0100
27 42
28static int debug; 43static int debug;
29module_param(debug, int, 0644); 44module_param(debug, int, 0644);
@@ -885,7 +900,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
885 mutex_lock(&priv->lock); 900 mutex_lock(&priv->lock);
886 901
887 /* Sync Lock Indicator */ 902 /* Sync Lock Indicator */
888 rc = xc2028_get_reg(priv, 0x0002, &frq_lock); 903 rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
889 if (rc < 0) 904 if (rc < 0)
890 goto ret; 905 goto ret;
891 906
@@ -894,7 +909,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
894 signal = 1 << 11; 909 signal = 1 << 11;
895 910
896 /* Get SNR of the video signal */ 911 /* Get SNR of the video signal */
897 rc = xc2028_get_reg(priv, 0x0040, &signal); 912 rc = xc2028_get_reg(priv, XREG_SNR, &signal);
898 if (rc < 0) 913 if (rc < 0)
899 goto ret; 914 goto ret;
900 915
@@ -1019,9 +1034,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
1019 1034
1020 /* CMD= Set frequency */ 1035 /* CMD= Set frequency */
1021 if (priv->firm_version < 0x0202) 1036 if (priv->firm_version < 0x0202)
1022 rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00}); 1037 rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
1023 else 1038 else
1024 rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00}); 1039 rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
1025 if (rc < 0) 1040 if (rc < 0)
1026 goto ret; 1041 goto ret;
1027 1042
@@ -1201,9 +1216,9 @@ static int xc2028_sleep(struct dvb_frontend *fe)
1201 mutex_lock(&priv->lock); 1216 mutex_lock(&priv->lock);
1202 1217
1203 if (priv->firm_version < 0x0202) 1218 if (priv->firm_version < 0x0202)
1204 rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00}); 1219 rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
1205 else 1220 else
1206 rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00}); 1221 rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
1207 1222
1208 priv->cur_fw.type = 0; /* need firmware reload */ 1223 priv->cur_fw.type = 0; /* need firmware reload */
1209 1224
diff --git a/drivers/media/common/tuners/xc4000.c b/drivers/media/common/tuners/xc4000.c
index d218c1d68c3..68397110b7d 100644
--- a/drivers/media/common/tuners/xc4000.c
+++ b/drivers/media/common/tuners/xc4000.c
@@ -154,6 +154,8 @@ struct xc4000_priv {
154#define XREG_SNR 0x06 154#define XREG_SNR 0x06
155#define XREG_VERSION 0x07 155#define XREG_VERSION 0x07
156#define XREG_PRODUCT_ID 0x08 156#define XREG_PRODUCT_ID 0x08
157#define XREG_SIGNAL_LEVEL 0x0A
158#define XREG_NOISE_LEVEL 0x0B
157 159
158/* 160/*
159 Basic firmware description. This will remain with 161 Basic firmware description. This will remain with
@@ -486,6 +488,16 @@ static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
486 return xc4000_readreg(priv, XREG_QUALITY, quality); 488 return xc4000_readreg(priv, XREG_QUALITY, quality);
487} 489}
488 490
491static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal)
492{
493 return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal);
494}
495
496static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise)
497{
498 return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise);
499}
500
489static u16 xc_wait_for_lock(struct xc4000_priv *priv) 501static u16 xc_wait_for_lock(struct xc4000_priv *priv)
490{ 502{
491 u16 lock_state = 0; 503 u16 lock_state = 0;
@@ -1089,6 +1101,8 @@ static void xc_debug_dump(struct xc4000_priv *priv)
1089 u32 hsync_freq_hz = 0; 1101 u32 hsync_freq_hz = 0;
1090 u16 frame_lines; 1102 u16 frame_lines;
1091 u16 quality; 1103 u16 quality;
1104 u16 signal = 0;
1105 u16 noise = 0;
1092 u8 hw_majorversion = 0, hw_minorversion = 0; 1106 u8 hw_majorversion = 0, hw_minorversion = 0;
1093 u8 fw_majorversion = 0, fw_minorversion = 0; 1107 u8 fw_majorversion = 0, fw_minorversion = 0;
1094 1108
@@ -1119,6 +1133,12 @@ static void xc_debug_dump(struct xc4000_priv *priv)
1119 1133
1120 xc_get_quality(priv, &quality); 1134 xc_get_quality(priv, &quality);
1121 dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality); 1135 dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
1136
1137 xc_get_signal_level(priv, &signal);
1138 dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal);
1139
1140 xc_get_noise_level(priv, &noise);
1141 dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise);
1122} 1142}
1123 1143
1124static int xc4000_set_params(struct dvb_frontend *fe) 1144static int xc4000_set_params(struct dvb_frontend *fe)
@@ -1432,6 +1452,71 @@ fail:
1432 return ret; 1452 return ret;
1433} 1453}
1434 1454
1455static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
1456{
1457 struct xc4000_priv *priv = fe->tuner_priv;
1458 u16 value = 0;
1459 int rc;
1460
1461 mutex_lock(&priv->lock);
1462 rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value);
1463 mutex_unlock(&priv->lock);
1464
1465 if (rc < 0)
1466 goto ret;
1467
1468 /* Informations from real testing of DVB-T and radio part,
1469 coeficient for one dB is 0xff.
1470 */
1471 tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
1472
1473 /* all known digital modes */
1474 if ((priv->video_standard == XC4000_DTV6) ||
1475 (priv->video_standard == XC4000_DTV7) ||
1476 (priv->video_standard == XC4000_DTV7_8) ||
1477 (priv->video_standard == XC4000_DTV8))
1478 goto digital;
1479
1480 /* Analog mode has NOISE LEVEL important, signal
1481 depends only on gain of antenna and amplifiers,
1482 but it doesn't tell anything about real quality
1483 of reception.
1484 */
1485 mutex_lock(&priv->lock);
1486 rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value);
1487 mutex_unlock(&priv->lock);
1488
1489 tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value);
1490
1491 /* highest noise level: 32dB */
1492 if (value >= 0x2000) {
1493 value = 0;
1494 } else {
1495 value = ~value << 3;
1496 }
1497
1498 goto ret;
1499
1500 /* Digital mode has SIGNAL LEVEL important and real
1501 noise level is stored in demodulator registers.
1502 */
1503digital:
1504 /* best signal: -50dB */
1505 if (value <= 0x3200) {
1506 value = 0xffff;
1507 /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */
1508 } else if (value >= 0x713A) {
1509 value = 0;
1510 } else {
1511 value = ~(value - 0x3200) << 2;
1512 }
1513
1514ret:
1515 *strength = value;
1516
1517 return rc;
1518}
1519
1435static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) 1520static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
1436{ 1521{
1437 struct xc4000_priv *priv = fe->tuner_priv; 1522 struct xc4000_priv *priv = fe->tuner_priv;
@@ -1559,6 +1644,7 @@ static const struct dvb_tuner_ops xc4000_tuner_ops = {
1559 .set_params = xc4000_set_params, 1644 .set_params = xc4000_set_params,
1560 .set_analog_params = xc4000_set_analog_params, 1645 .set_analog_params = xc4000_set_analog_params,
1561 .get_frequency = xc4000_get_frequency, 1646 .get_frequency = xc4000_get_frequency,
1647 .get_rf_strength = xc4000_get_signal,
1562 .get_bandwidth = xc4000_get_bandwidth, 1648 .get_bandwidth = xc4000_get_bandwidth,
1563 .get_status = xc4000_get_status 1649 .get_status = xc4000_get_status
1564}; 1650};
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index b15db4fe347..fbbe545a74c 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -904,8 +904,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
904{ 904{
905 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 905 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
906 int i; 906 int i;
907 u32 delsys;
907 908
909 delsys = c->delivery_system;
908 memset(c, 0, sizeof(struct dtv_frontend_properties)); 910 memset(c, 0, sizeof(struct dtv_frontend_properties));
911 c->delivery_system = delsys;
909 912
910 c->state = DTV_CLEAR; 913 c->state = DTV_CLEAR;
911 914
@@ -1009,25 +1012,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
1009 _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0), 1012 _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
1010 _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0), 1013 _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
1011 1014
1012 _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0),
1013 _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0),
1014 _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0),
1015 _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0),
1016 _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0),
1017 _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0),
1018 _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0),
1019 _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0),
1020 _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0),
1021 _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0),
1022 _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0),
1023 _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0),
1024 _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0),
1025 _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0),
1026 _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0),
1027 _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0),
1028 _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0),
1029 _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
1030
1031 _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0), 1015 _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
1032 _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0), 1016 _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
1033 1017
@@ -1413,6 +1397,15 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
1413 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 1397 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
1414 enum dvbv3_emulation_type type; 1398 enum dvbv3_emulation_type type;
1415 1399
1400 /*
1401 * It was reported that some old DVBv5 applications were
1402 * filling delivery_system with SYS_UNDEFINED. If this happens,
1403 * assume that the application wants to use the first supported
1404 * delivery system.
1405 */
1406 if (c->delivery_system == SYS_UNDEFINED)
1407 c->delivery_system = fe->ops.delsys[0];
1408
1416 if (desired_system == SYS_UNDEFINED) { 1409 if (desired_system == SYS_UNDEFINED) {
1417 /* 1410 /*
1418 * A DVBv3 call doesn't know what's the desired system. 1411 * A DVBv3 call doesn't know what's the desired system.
@@ -1732,6 +1725,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
1732{ 1725{
1733 struct dvb_device *dvbdev = file->private_data; 1726 struct dvb_device *dvbdev = file->private_data;
1734 struct dvb_frontend *fe = dvbdev->priv; 1727 struct dvb_frontend *fe = dvbdev->priv;
1728 struct dvb_frontend_private *fepriv = fe->frontend_priv;
1735 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 1729 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
1736 int err = 0; 1730 int err = 0;
1737 1731
@@ -1798,9 +1792,14 @@ static int dvb_frontend_ioctl_properties(struct file *file,
1798 1792
1799 /* 1793 /*
1800 * Fills the cache out struct with the cache contents, plus 1794 * Fills the cache out struct with the cache contents, plus
1801 * the data retrieved from get_frontend. 1795 * the data retrieved from get_frontend, if the frontend
1796 * is not idle. Otherwise, returns the cached content
1802 */ 1797 */
1803 dtv_get_frontend(fe, NULL); 1798 if (fepriv->state != FESTATE_IDLE) {
1799 err = dtv_get_frontend(fe, NULL);
1800 if (err < 0)
1801 goto out;
1802 }
1804 for (i = 0; i < tvps->num; i++) { 1803 for (i = 0; i < tvps->num; i++) {
1805 err = dtv_property_process_get(fe, c, tvp + i, file); 1804 err = dtv_property_process_get(fe, c, tvp + i, file);
1806 if (err < 0) 1805 if (err < 0)
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index d66192974d6..1455e2644ab 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -877,24 +877,18 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
877 case ANYSEE_HW_508T2C: /* 20 */ 877 case ANYSEE_HW_508T2C: /* 20 */
878 /* E7 T2C */ 878 /* E7 T2C */
879 879
880 if (state->fe_id)
881 break;
882
880 /* enable DVB-T/T2/C demod on IOE[5] */ 883 /* enable DVB-T/T2/C demod on IOE[5] */
881 ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20); 884 ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
882 if (ret) 885 if (ret)
883 goto error; 886 goto error;
884 887
885 if (state->fe_id == 0) { 888 /* attach demod */
886 /* DVB-T/T2 */ 889 adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
887 adap->fe_adap[state->fe_id].fe = 890 &anysee_cxd2820r_config, &adap->dev->i2c_adap,
888 dvb_attach(cxd2820r_attach, 891 NULL);
889 &anysee_cxd2820r_config,
890 &adap->dev->i2c_adap, NULL);
891 } else {
892 /* DVB-C */
893 adap->fe_adap[state->fe_id].fe =
894 dvb_attach(cxd2820r_attach,
895 &anysee_cxd2820r_config,
896 &adap->dev->i2c_adap, adap->fe_adap[0].fe);
897 }
898 892
899 state->has_ci = true; 893 state->has_ci = true;
900 894
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index 9bd6d51b3b9..7de125c0b36 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -48,6 +48,8 @@ struct dib0700_state {
48 u8 disable_streaming_master_mode; 48 u8 disable_streaming_master_mode;
49 u32 fw_version; 49 u32 fw_version;
50 u32 nb_packet_buffer_size; 50 u32 nb_packet_buffer_size;
51 int (*read_status)(struct dvb_frontend *, fe_status_t *);
52 int (*sleep)(struct dvb_frontend* fe);
51 u8 buf[255]; 53 u8 buf[255];
52}; 54};
53 55
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 206999476f0..070e82aa53f 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -834,6 +834,7 @@ static struct usb_driver dib0700_driver = {
834 834
835module_usb_driver(dib0700_driver); 835module_usb_driver(dib0700_driver);
836 836
837MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
837MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); 838MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
838MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge"); 839MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
839MODULE_VERSION("1.0"); 840MODULE_VERSION("1.0");
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 81ef4b46f79..f9e966aa26e 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -3066,19 +3066,25 @@ static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
3066 } 3066 }
3067}; 3067};
3068 3068
3069static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) 3069static void stk7070pd_init(struct dvb_usb_device *dev)
3070{ 3070{
3071 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); 3071 dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1);
3072 msleep(10); 3072 msleep(10);
3073 dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); 3073 dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1);
3074 dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); 3074 dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1);
3075 dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); 3075 dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1);
3076 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); 3076 dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0);
3077 3077
3078 dib0700_ctrl_clock(adap->dev, 72, 1); 3078 dib0700_ctrl_clock(dev, 72, 1);
3079 3079
3080 msleep(10); 3080 msleep(10);
3081 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); 3081 dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1);
3082}
3083
3084static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
3085{
3086 stk7070pd_init(adap->dev);
3087
3082 msleep(10); 3088 msleep(10);
3083 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); 3089 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
3084 3090
@@ -3099,6 +3105,77 @@ static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap)
3099 return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; 3105 return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
3100} 3106}
3101 3107
3108static int novatd_read_status_override(struct dvb_frontend *fe,
3109 fe_status_t *stat)
3110{
3111 struct dvb_usb_adapter *adap = fe->dvb->priv;
3112 struct dvb_usb_device *dev = adap->dev;
3113 struct dib0700_state *state = dev->priv;
3114 int ret;
3115
3116 ret = state->read_status(fe, stat);
3117
3118 if (!ret)
3119 dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT,
3120 !!(*stat & FE_HAS_LOCK));
3121
3122 return ret;
3123}
3124
3125static int novatd_sleep_override(struct dvb_frontend* fe)
3126{
3127 struct dvb_usb_adapter *adap = fe->dvb->priv;
3128 struct dvb_usb_device *dev = adap->dev;
3129 struct dib0700_state *state = dev->priv;
3130
3131 /* turn off LED */
3132 dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0);
3133
3134 return state->sleep(fe);
3135}
3136
3137/**
3138 * novatd_frontend_attach - Nova-TD specific attach
3139 *
3140 * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
3141 * information purposes.
3142 */
3143static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
3144{
3145 struct dvb_usb_device *dev = adap->dev;
3146 struct dib0700_state *st = dev->priv;
3147
3148 if (adap->id == 0) {
3149 stk7070pd_init(dev);
3150
3151 /* turn the power LED on, the other two off (just in case) */
3152 dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0);
3153 dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0);
3154 dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1);
3155
3156 if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18,
3157 stk7070pd_dib7000p_config) != 0) {
3158 err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
3159 __func__);
3160 return -ENODEV;
3161 }
3162 }
3163
3164 adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap,
3165 adap->id == 0 ? 0x80 : 0x82,
3166 &stk7070pd_dib7000p_config[adap->id]);
3167
3168 if (adap->fe_adap[0].fe == NULL)
3169 return -ENODEV;
3170
3171 st->read_status = adap->fe_adap[0].fe->ops.read_status;
3172 adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override;
3173 st->sleep = adap->fe_adap[0].fe->ops.sleep;
3174 adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override;
3175
3176 return 0;
3177}
3178
3102/* S5H1411 */ 3179/* S5H1411 */
3103static struct s5h1411_config pinnacle_801e_config = { 3180static struct s5h1411_config pinnacle_801e_config = {
3104 .output_mode = S5H1411_PARALLEL_OUTPUT, 3181 .output_mode = S5H1411_PARALLEL_OUTPUT,
@@ -3870,6 +3947,57 @@ struct dvb_usb_device_properties dib0700_devices[] = {
3870 .pid_filter_count = 32, 3947 .pid_filter_count = 32,
3871 .pid_filter = stk70x0p_pid_filter, 3948 .pid_filter = stk70x0p_pid_filter,
3872 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, 3949 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
3950 .frontend_attach = novatd_frontend_attach,
3951 .tuner_attach = dib7070p_tuner_attach,
3952
3953 DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
3954 }},
3955 .size_of_priv = sizeof(struct dib0700_adapter_state),
3956 }, {
3957 .num_frontends = 1,
3958 .fe = {{
3959 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
3960 .pid_filter_count = 32,
3961 .pid_filter = stk70x0p_pid_filter,
3962 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
3963 .frontend_attach = novatd_frontend_attach,
3964 .tuner_attach = dib7070p_tuner_attach,
3965
3966 DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
3967 }},
3968 .size_of_priv = sizeof(struct dib0700_adapter_state),
3969 }
3970 },
3971
3972 .num_device_descs = 1,
3973 .devices = {
3974 { "Hauppauge Nova-TD Stick (52009)",
3975 { &dib0700_usb_id_table[35], NULL },
3976 { NULL },
3977 },
3978 },
3979
3980 .rc.core = {
3981 .rc_interval = DEFAULT_RC_INTERVAL,
3982 .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
3983 .module_name = "dib0700",
3984 .rc_query = dib0700_rc_query_old_firmware,
3985 .allowed_protos = RC_TYPE_RC5 |
3986 RC_TYPE_RC6 |
3987 RC_TYPE_NEC,
3988 .change_protocol = dib0700_change_protocol,
3989 },
3990 }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
3991
3992 .num_adapters = 2,
3993 .adapter = {
3994 {
3995 .num_frontends = 1,
3996 .fe = {{
3997 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
3998 .pid_filter_count = 32,
3999 .pid_filter = stk70x0p_pid_filter,
4000 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
3873 .frontend_attach = stk7070pd_frontend_attach0, 4001 .frontend_attach = stk7070pd_frontend_attach0,
3874 .tuner_attach = dib7070p_tuner_attach, 4002 .tuner_attach = dib7070p_tuner_attach,
3875 4003
@@ -3892,7 +4020,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
3892 } 4020 }
3893 }, 4021 },
3894 4022
3895 .num_device_descs = 6, 4023 .num_device_descs = 5,
3896 .devices = { 4024 .devices = {
3897 { "DiBcom STK7070PD reference design", 4025 { "DiBcom STK7070PD reference design",
3898 { &dib0700_usb_id_table[17], NULL }, 4026 { &dib0700_usb_id_table[17], NULL },
@@ -3902,10 +4030,6 @@ struct dvb_usb_device_properties dib0700_devices[] = {
3902 { &dib0700_usb_id_table[18], NULL }, 4030 { &dib0700_usb_id_table[18], NULL },
3903 { NULL }, 4031 { NULL },
3904 }, 4032 },
3905 { "Hauppauge Nova-TD Stick (52009)",
3906 { &dib0700_usb_id_table[35], NULL },
3907 { NULL },
3908 },
3909 { "Hauppauge Nova-TD-500 (84xxx)", 4033 { "Hauppauge Nova-TD-500 (84xxx)",
3910 { &dib0700_usb_id_table[36], NULL }, 4034 { &dib0700_usb_id_table[36], NULL },
3911 { NULL }, 4035 { NULL },
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 93e1b12e790..caae7f79c83 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -309,9 +309,14 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status)
309 309
310static int cxd2820r_get_frontend(struct dvb_frontend *fe) 310static int cxd2820r_get_frontend(struct dvb_frontend *fe)
311{ 311{
312 struct cxd2820r_priv *priv = fe->demodulator_priv;
312 int ret; 313 int ret;
313 314
314 dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); 315 dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
316
317 if (priv->delivery_system == SYS_UNDEFINED)
318 return 0;
319
315 switch (fe->dtv_property_cache.delivery_system) { 320 switch (fe->dtv_property_cache.delivery_system) {
316 case SYS_DVBT: 321 case SYS_DVBT:
317 ret = cxd2820r_get_frontend_t(fe); 322 ret = cxd2820r_get_frontend_t(fe);
@@ -476,10 +481,10 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
476 dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); 481 dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
477 482
478 /* switch between DVB-T and DVB-T2 when tune fails */ 483 /* switch between DVB-T and DVB-T2 when tune fails */
479 if (priv->last_tune_failed && (priv->delivery_system != SYS_DVBC_ANNEX_A)) { 484 if (priv->last_tune_failed) {
480 if (priv->delivery_system == SYS_DVBT) 485 if (priv->delivery_system == SYS_DVBT)
481 c->delivery_system = SYS_DVBT2; 486 c->delivery_system = SYS_DVBT2;
482 else 487 else if (priv->delivery_system == SYS_DVBT2)
483 c->delivery_system = SYS_DVBT; 488 c->delivery_system = SYS_DVBT;
484 } 489 }
485 490
@@ -492,6 +497,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
492 /* frontend lock wait loop count */ 497 /* frontend lock wait loop count */
493 switch (priv->delivery_system) { 498 switch (priv->delivery_system) {
494 case SYS_DVBT: 499 case SYS_DVBT:
500 case SYS_DVBC_ANNEX_A:
495 i = 20; 501 i = 20;
496 break; 502 break;
497 case SYS_DVBT2: 503 case SYS_DVBT2:
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index 938777065de..af65d013db1 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -1195,7 +1195,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
1195 1195
1196 for (i = 0; i < 30 ; i++) { 1196 for (i = 0; i < 30 ; i++) {
1197 ds3000_read_status(fe, &status); 1197 ds3000_read_status(fe, &status);
1198 if (status && FE_HAS_LOCK) 1198 if (status & FE_HAS_LOCK)
1199 break; 1199 break;
1200 1200
1201 msleep(10); 1201 msleep(10);
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index 7fa3e472cdc..fade566927c 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -402,7 +402,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state,
402 [2] = 0x8e, /* Layer C */ 402 [2] = 0x8e, /* Layer C */
403 }; 403 };
404 404
405 if (layer > ARRAY_SIZE(reg)) 405 if (layer >= ARRAY_SIZE(reg))
406 return -EINVAL; 406 return -EINVAL;
407 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); 407 rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
408 if (rc < 0) 408 if (rc < 0)
@@ -435,7 +435,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state,
435 [2] = 0x8f, /* Layer C */ 435 [2] = 0x8f, /* Layer C */
436 }; 436 };
437 437
438 if (layer > ARRAY_SIZE(reg)) 438 if (layer >= ARRAY_SIZE(reg))
439 return -EINVAL; 439 return -EINVAL;
440 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); 440 rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
441 if (rc < 0) 441 if (rc < 0)
@@ -470,7 +470,7 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
470 [2] = 0x90, /* Layer C */ 470 [2] = 0x90, /* Layer C */
471 }; 471 };
472 472
473 if (layer > ARRAY_SIZE(reg)) 473 if (layer >= ARRAY_SIZE(reg))
474 return -EINVAL; 474 return -EINVAL;
475 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); 475 rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
476 if (rc < 0) 476 if (rc < 0)
@@ -494,7 +494,7 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
494 [2] = 0x91, /* Layer C */ 494 [2] = 0x91, /* Layer C */
495 }; 495 };
496 496
497 if (layer > ARRAY_SIZE(reg)) 497 if (layer >= ARRAY_SIZE(reg))
498 return -EINVAL; 498 return -EINVAL;
499 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); 499 rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
500 if (rc < 0) 500 if (rc < 0)
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c
index 86da3d81649..ad7c72e8f51 100644
--- a/drivers/media/dvb/frontends/tda18271c2dd.c
+++ b/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -29,7 +29,6 @@
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/firmware.h> 30#include <linux/firmware.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/version.h>
33#include <asm/div64.h> 32#include <asm/div64.h>
34 33
35#include "dvb_frontend.h" 34#include "dvb_frontend.h"
diff --git a/drivers/media/video/as3645a.c b/drivers/media/video/as3645a.c
index ec859a58065..f241702a0f3 100644
--- a/drivers/media/video/as3645a.c
+++ b/drivers/media/video/as3645a.c
@@ -29,6 +29,7 @@
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/slab.h>
32 33
33#include <media/as3645a.h> 34#include <media/as3645a.h>
34#include <media/v4l2-ctrls.h> 35#include <media/v4l2-ctrls.h>
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 14cb961c22b..4bfd865a410 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -751,20 +751,10 @@ int cx18_v4l2_close(struct file *filp)
751 751
752 CX18_DEBUG_IOCTL("close() of %s\n", s->name); 752 CX18_DEBUG_IOCTL("close() of %s\n", s->name);
753 753
754 v4l2_fh_del(fh);
755 v4l2_fh_exit(fh);
756
757 /* Easy case first: this stream was never claimed by us */
758 if (s->id != id->open_id) {
759 kfree(id);
760 return 0;
761 }
762
763 /* 'Unclaim' this stream */
764
765 /* Stop radio */
766 mutex_lock(&cx->serialize_lock); 754 mutex_lock(&cx->serialize_lock);
767 if (id->type == CX18_ENC_STREAM_TYPE_RAD) { 755 /* Stop radio */
756 if (id->type == CX18_ENC_STREAM_TYPE_RAD &&
757 v4l2_fh_is_singular_file(filp)) {
768 /* Closing radio device, return to TV mode */ 758 /* Closing radio device, return to TV mode */
769 cx18_mute(cx); 759 cx18_mute(cx);
770 /* Mark that the radio is no longer in use */ 760 /* Mark that the radio is no longer in use */
@@ -781,10 +771,14 @@ int cx18_v4l2_close(struct file *filp)
781 } 771 }
782 /* Done! Unmute and continue. */ 772 /* Done! Unmute and continue. */
783 cx18_unmute(cx); 773 cx18_unmute(cx);
784 cx18_release_stream(s);
785 } else {
786 cx18_stop_capture(id, 0);
787 } 774 }
775
776 v4l2_fh_del(fh);
777 v4l2_fh_exit(fh);
778
779 /* 'Unclaim' this stream */
780 if (s->id == id->open_id)
781 cx18_stop_capture(id, 0);
788 kfree(id); 782 kfree(id);
789 mutex_unlock(&cx->serialize_lock); 783 mutex_unlock(&cx->serialize_lock);
790 return 0; 784 return 0;
@@ -810,21 +804,15 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
810 804
811 item->open_id = cx->open_id++; 805 item->open_id = cx->open_id++;
812 filp->private_data = &item->fh; 806 filp->private_data = &item->fh;
807 v4l2_fh_add(&item->fh);
813 808
814 if (item->type == CX18_ENC_STREAM_TYPE_RAD) { 809 if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
815 /* Try to claim this stream */ 810 v4l2_fh_is_singular_file(filp)) {
816 if (cx18_claim_stream(item, item->type)) {
817 /* No, it's already in use */
818 v4l2_fh_exit(&item->fh);
819 kfree(item);
820 return -EBUSY;
821 }
822
823 if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) { 811 if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
824 if (atomic_read(&cx->ana_capturing) > 0) { 812 if (atomic_read(&cx->ana_capturing) > 0) {
825 /* switching to radio while capture is 813 /* switching to radio while capture is
826 in progress is not polite */ 814 in progress is not polite */
827 cx18_release_stream(s); 815 v4l2_fh_del(&item->fh);
828 v4l2_fh_exit(&item->fh); 816 v4l2_fh_exit(&item->fh);
829 kfree(item); 817 kfree(item);
830 return -EBUSY; 818 return -EBUSY;
@@ -842,7 +830,6 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
842 /* Done! Unmute and continue. */ 830 /* Done! Unmute and continue. */
843 cx18_unmute(cx); 831 cx18_unmute(cx);
844 } 832 }
845 v4l2_fh_add(&item->fh);
846 return 0; 833 return 0;
847} 834}
848 835
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 919ed77b32f..875a7ce9473 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1052,7 +1052,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1052 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1052 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1053 if (dev == NULL) { 1053 if (dev == NULL) {
1054 cx231xx_err(DRIVER_NAME ": out of memory!\n"); 1054 cx231xx_err(DRIVER_NAME ": out of memory!\n");
1055 clear_bit(dev->devno, &cx231xx_devused); 1055 clear_bit(nr, &cx231xx_devused);
1056 return -ENOMEM; 1056 return -ENOMEM;
1057 } 1057 }
1058 1058
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 3c01be999e3..19b5499d262 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -213,8 +213,8 @@ struct cx23885_board cx23885_boards[] = {
213 .portc = CX23885_MPEG_DVB, 213 .portc = CX23885_MPEG_DVB,
214 .tuner_type = TUNER_XC4000, 214 .tuner_type = TUNER_XC4000,
215 .tuner_addr = 0x61, 215 .tuner_addr = 0x61,
216 .radio_type = TUNER_XC4000, 216 .radio_type = UNSET,
217 .radio_addr = 0x61, 217 .radio_addr = ADDR_UNSET,
218 .input = {{ 218 .input = {{
219 .type = CX23885_VMUX_TELEVISION, 219 .type = CX23885_VMUX_TELEVISION,
220 .vmux = CX25840_VIN2_CH1 | 220 .vmux = CX25840_VIN2_CH1 |
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index af8a225763d..6835eb1fc09 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -943,6 +943,11 @@ static int dvb_register(struct cx23885_tsport *port)
943 943
944 fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, 944 fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
945 &dev->i2c_bus[1].i2c_adap, &cfg); 945 &dev->i2c_bus[1].i2c_adap, &cfg);
946 if (!fe) {
947 printk(KERN_ERR "%s/2: xc4000 attach failed\n",
948 dev->name);
949 goto frontend_detach;
950 }
946 } 951 }
947 break; 952 break;
948 case CX23885_BOARD_TBS_6920: 953 case CX23885_BOARD_TBS_6920:
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 4bbf9bb97bd..c654bdc7ccb 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1550,7 +1550,6 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
1550 struct v4l2_control ctrl; 1550 struct v4l2_control ctrl;
1551 struct videobuf_dvb_frontend *vfe; 1551 struct videobuf_dvb_frontend *vfe;
1552 struct dvb_frontend *fe; 1552 struct dvb_frontend *fe;
1553 int err = 0;
1554 1553
1555 struct analog_parameters params = { 1554 struct analog_parameters params = {
1556 .mode = V4L2_TUNER_ANALOG_TV, 1555 .mode = V4L2_TUNER_ANALOG_TV,
@@ -1572,8 +1571,10 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
1572 params.frequency, f->tuner, params.std); 1571 params.frequency, f->tuner, params.std);
1573 1572
1574 vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1); 1573 vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1);
1575 if (!vfe) 1574 if (!vfe) {
1576 err = -EINVAL; 1575 mutex_unlock(&dev->lock);
1576 return -EINVAL;
1577 }
1577 1578
1578 fe = vfe->dvb.frontend; 1579 fe = vfe->dvb.frontend;
1579 1580
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 62c7ad050f9..cbd5d119a2c 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
1573 .name = "Pinnacle Hybrid PCTV", 1573 .name = "Pinnacle Hybrid PCTV",
1574 .tuner_type = TUNER_XC2028, 1574 .tuner_type = TUNER_XC2028,
1575 .tuner_addr = 0x61, 1575 .tuner_addr = 0x61,
1576 .radio_type = TUNER_XC2028, 1576 .radio_type = UNSET,
1577 .radio_addr = 0x61, 1577 .radio_addr = ADDR_UNSET,
1578 .input = { { 1578 .input = { {
1579 .type = CX88_VMUX_TELEVISION, 1579 .type = CX88_VMUX_TELEVISION,
1580 .vmux = 0, 1580 .vmux = 0,
@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
1611 .name = "Leadtek TV2000 XP Global", 1611 .name = "Leadtek TV2000 XP Global",
1612 .tuner_type = TUNER_XC2028, 1612 .tuner_type = TUNER_XC2028,
1613 .tuner_addr = 0x61, 1613 .tuner_addr = 0x61,
1614 .radio_type = TUNER_XC2028, 1614 .radio_type = UNSET,
1615 .radio_addr = 0x61, 1615 .radio_addr = ADDR_UNSET,
1616 .input = { { 1616 .input = { {
1617 .type = CX88_VMUX_TELEVISION, 1617 .type = CX88_VMUX_TELEVISION,
1618 .vmux = 0, 1618 .vmux = 0,
@@ -2115,8 +2115,8 @@ static const struct cx88_board cx88_boards[] = {
2115 .name = "Terratec Cinergy HT PCI MKII", 2115 .name = "Terratec Cinergy HT PCI MKII",
2116 .tuner_type = TUNER_XC2028, 2116 .tuner_type = TUNER_XC2028,
2117 .tuner_addr = 0x61, 2117 .tuner_addr = 0x61,
2118 .radio_type = TUNER_XC2028, 2118 .radio_type = UNSET,
2119 .radio_addr = 0x61, 2119 .radio_addr = ADDR_UNSET,
2120 .input = { { 2120 .input = { {
2121 .type = CX88_VMUX_TELEVISION, 2121 .type = CX88_VMUX_TELEVISION,
2122 .vmux = 0, 2122 .vmux = 0,
@@ -2154,9 +2154,9 @@ static const struct cx88_board cx88_boards[] = {
2154 [CX88_BOARD_WINFAST_DTV1800H] = { 2154 [CX88_BOARD_WINFAST_DTV1800H] = {
2155 .name = "Leadtek WinFast DTV1800 Hybrid", 2155 .name = "Leadtek WinFast DTV1800 Hybrid",
2156 .tuner_type = TUNER_XC2028, 2156 .tuner_type = TUNER_XC2028,
2157 .radio_type = TUNER_XC2028, 2157 .radio_type = UNSET,
2158 .tuner_addr = 0x61, 2158 .tuner_addr = 0x61,
2159 .radio_addr = 0x61, 2159 .radio_addr = ADDR_UNSET,
2160 /* 2160 /*
2161 * GPIO setting 2161 * GPIO setting
2162 * 2162 *
@@ -2195,9 +2195,9 @@ static const struct cx88_board cx88_boards[] = {
2195 [CX88_BOARD_WINFAST_DTV1800H_XC4000] = { 2195 [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
2196 .name = "Leadtek WinFast DTV1800 H (XC4000)", 2196 .name = "Leadtek WinFast DTV1800 H (XC4000)",
2197 .tuner_type = TUNER_XC4000, 2197 .tuner_type = TUNER_XC4000,
2198 .radio_type = TUNER_XC4000, 2198 .radio_type = UNSET,
2199 .tuner_addr = 0x61, 2199 .tuner_addr = 0x61,
2200 .radio_addr = 0x61, 2200 .radio_addr = ADDR_UNSET,
2201 /* 2201 /*
2202 * GPIO setting 2202 * GPIO setting
2203 * 2203 *
@@ -2236,9 +2236,9 @@ static const struct cx88_board cx88_boards[] = {
2236 [CX88_BOARD_WINFAST_DTV2000H_PLUS] = { 2236 [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
2237 .name = "Leadtek WinFast DTV2000 H PLUS", 2237 .name = "Leadtek WinFast DTV2000 H PLUS",
2238 .tuner_type = TUNER_XC4000, 2238 .tuner_type = TUNER_XC4000,
2239 .radio_type = TUNER_XC4000, 2239 .radio_type = UNSET,
2240 .tuner_addr = 0x61, 2240 .tuner_addr = 0x61,
2241 .radio_addr = 0x61, 2241 .radio_addr = ADDR_UNSET,
2242 /* 2242 /*
2243 * GPIO 2243 * GPIO
2244 * 2: 1: mute audio 2244 * 2: 1: mute audio
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 544af91cbdc..3949b7dc236 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -731,9 +731,6 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
731 731
732 init_kthread_work(&itv->irq_work, ivtv_irq_work_handler); 732 init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
733 733
734 /* start counting open_id at 1 */
735 itv->open_id = 1;
736
737 /* Initial settings */ 734 /* Initial settings */
738 itv->cxhdl.port = CX2341X_PORT_MEMORY; 735 itv->cxhdl.port = CX2341X_PORT_MEMORY;
739 itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI; 736 itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 8f9cc17b518..06f3d78389b 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -332,7 +332,7 @@ struct ivtv_stream {
332 const char *name; /* name of the stream */ 332 const char *name; /* name of the stream */
333 int type; /* stream type */ 333 int type; /* stream type */
334 334
335 u32 id; 335 struct v4l2_fh *fh; /* pointer to the streaming filehandle */
336 spinlock_t qlock; /* locks access to the queues */ 336 spinlock_t qlock; /* locks access to the queues */
337 unsigned long s_flags; /* status flags, see above */ 337 unsigned long s_flags; /* status flags, see above */
338 int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */ 338 int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -379,7 +379,6 @@ struct ivtv_stream {
379 379
380struct ivtv_open_id { 380struct ivtv_open_id {
381 struct v4l2_fh fh; 381 struct v4l2_fh fh;
382 u32 open_id; /* unique ID for this file descriptor */
383 int type; /* stream type */ 382 int type; /* stream type */
384 int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */ 383 int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */
385 struct ivtv *itv; 384 struct ivtv *itv;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 38f052257f4..2cd6c89b7d9 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -50,16 +50,16 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
50 50
51 if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { 51 if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
52 /* someone already claimed this stream */ 52 /* someone already claimed this stream */
53 if (s->id == id->open_id) { 53 if (s->fh == &id->fh) {
54 /* yes, this file descriptor did. So that's OK. */ 54 /* yes, this file descriptor did. So that's OK. */
55 return 0; 55 return 0;
56 } 56 }
57 if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI || 57 if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
58 type == IVTV_ENC_STREAM_TYPE_VBI)) { 58 type == IVTV_ENC_STREAM_TYPE_VBI)) {
59 /* VBI is handled already internally, now also assign 59 /* VBI is handled already internally, now also assign
60 the file descriptor to this stream for external 60 the file descriptor to this stream for external
61 reading of the stream. */ 61 reading of the stream. */
62 s->id = id->open_id; 62 s->fh = &id->fh;
63 IVTV_DEBUG_INFO("Start Read VBI\n"); 63 IVTV_DEBUG_INFO("Start Read VBI\n");
64 return 0; 64 return 0;
65 } 65 }
@@ -67,7 +67,7 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
67 IVTV_DEBUG_INFO("Stream %d is busy\n", type); 67 IVTV_DEBUG_INFO("Stream %d is busy\n", type);
68 return -EBUSY; 68 return -EBUSY;
69 } 69 }
70 s->id = id->open_id; 70 s->fh = &id->fh;
71 if (type == IVTV_DEC_STREAM_TYPE_VBI) { 71 if (type == IVTV_DEC_STREAM_TYPE_VBI) {
72 /* Enable reinsertion interrupt */ 72 /* Enable reinsertion interrupt */
73 ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT); 73 ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -104,7 +104,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
104 struct ivtv *itv = s->itv; 104 struct ivtv *itv = s->itv;
105 struct ivtv_stream *s_vbi; 105 struct ivtv_stream *s_vbi;
106 106
107 s->id = -1; 107 s->fh = NULL;
108 if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) && 108 if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
109 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { 109 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
110 /* this stream is still in use internally */ 110 /* this stream is still in use internally */
@@ -136,7 +136,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
136 /* was already cleared */ 136 /* was already cleared */
137 return; 137 return;
138 } 138 }
139 if (s_vbi->id != -1) { 139 if (s_vbi->fh) {
140 /* VBI stream still claimed by a file descriptor */ 140 /* VBI stream still claimed by a file descriptor */
141 return; 141 return;
142 } 142 }
@@ -268,11 +268,13 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block,
268 } 268 }
269 269
270 /* wait for more data to arrive */ 270 /* wait for more data to arrive */
271 mutex_unlock(&itv->serialize_lock);
271 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); 272 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
272 /* New buffers might have become available before we were added to the waitqueue */ 273 /* New buffers might have become available before we were added to the waitqueue */
273 if (!s->q_full.buffers) 274 if (!s->q_full.buffers)
274 schedule(); 275 schedule();
275 finish_wait(&s->waitq, &wait); 276 finish_wait(&s->waitq, &wait);
277 mutex_lock(&itv->serialize_lock);
276 if (signal_pending(current)) { 278 if (signal_pending(current)) {
277 /* return if a signal was received */ 279 /* return if a signal was received */
278 IVTV_DEBUG_INFO("User stopped %s\n", s->name); 280 IVTV_DEBUG_INFO("User stopped %s\n", s->name);
@@ -357,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
357 size_t tot_written = 0; 359 size_t tot_written = 0;
358 int single_frame = 0; 360 int single_frame = 0;
359 361
360 if (atomic_read(&itv->capturing) == 0 && s->id == -1) { 362 if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
361 /* shouldn't happen */ 363 /* shouldn't happen */
362 IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name); 364 IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
363 return -EIO; 365 return -EIO;
@@ -507,9 +509,7 @@ ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_
507 509
508 IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name); 510 IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
509 511
510 mutex_lock(&itv->serialize_lock);
511 rc = ivtv_start_capture(id); 512 rc = ivtv_start_capture(id);
512 mutex_unlock(&itv->serialize_lock);
513 if (rc) 513 if (rc)
514 return rc; 514 return rc;
515 return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK); 515 return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
@@ -584,9 +584,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
584 set_bit(IVTV_F_S_APPL_IO, &s->s_flags); 584 set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
585 585
586 /* Start decoder (returns 0 if already started) */ 586 /* Start decoder (returns 0 if already started) */
587 mutex_lock(&itv->serialize_lock);
588 rc = ivtv_start_decoding(id, itv->speed); 587 rc = ivtv_start_decoding(id, itv->speed);
589 mutex_unlock(&itv->serialize_lock);
590 if (rc) { 588 if (rc) {
591 IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name); 589 IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
592 590
@@ -627,11 +625,13 @@ retry:
627 break; 625 break;
628 if (filp->f_flags & O_NONBLOCK) 626 if (filp->f_flags & O_NONBLOCK)
629 return -EAGAIN; 627 return -EAGAIN;
628 mutex_unlock(&itv->serialize_lock);
630 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); 629 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
631 /* New buffers might have become free before we were added to the waitqueue */ 630 /* New buffers might have become free before we were added to the waitqueue */
632 if (!s->q_free.buffers) 631 if (!s->q_free.buffers)
633 schedule(); 632 schedule();
634 finish_wait(&s->waitq, &wait); 633 finish_wait(&s->waitq, &wait);
634 mutex_lock(&itv->serialize_lock);
635 if (signal_pending(current)) { 635 if (signal_pending(current)) {
636 IVTV_DEBUG_INFO("User stopped %s\n", s->name); 636 IVTV_DEBUG_INFO("User stopped %s\n", s->name);
637 return -EINTR; 637 return -EINTR;
@@ -686,12 +686,14 @@ retry:
686 if (mode == OUT_YUV) 686 if (mode == OUT_YUV)
687 ivtv_yuv_setup_stream_frame(itv); 687 ivtv_yuv_setup_stream_frame(itv);
688 688
689 mutex_unlock(&itv->serialize_lock);
689 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); 690 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
690 while (!(got_sig = signal_pending(current)) && 691 while (!(got_sig = signal_pending(current)) &&
691 test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) { 692 test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
692 schedule(); 693 schedule();
693 } 694 }
694 finish_wait(&itv->dma_waitq, &wait); 695 finish_wait(&itv->dma_waitq, &wait);
696 mutex_lock(&itv->serialize_lock);
695 if (got_sig) { 697 if (got_sig) {
696 IVTV_DEBUG_INFO("User interrupted %s\n", s->name); 698 IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
697 return -EINTR; 699 return -EINTR;
@@ -756,9 +758,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
756 if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) { 758 if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
757 int rc; 759 int rc;
758 760
759 mutex_lock(&itv->serialize_lock);
760 rc = ivtv_start_capture(id); 761 rc = ivtv_start_capture(id);
761 mutex_unlock(&itv->serialize_lock);
762 if (rc) { 762 if (rc) {
763 IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n", 763 IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
764 s->name, rc); 764 s->name, rc);
@@ -808,7 +808,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
808 id->type == IVTV_ENC_STREAM_TYPE_VBI) && 808 id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
809 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { 809 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
810 /* Also used internally, don't stop capturing */ 810 /* Also used internally, don't stop capturing */
811 s->id = -1; 811 s->fh = NULL;
812 } 812 }
813 else { 813 else {
814 ivtv_stop_v4l2_encode_stream(s, gop_end); 814 ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -861,20 +861,9 @@ int ivtv_v4l2_close(struct file *filp)
861 861
862 IVTV_DEBUG_FILE("close %s\n", s->name); 862 IVTV_DEBUG_FILE("close %s\n", s->name);
863 863
864 v4l2_fh_del(fh);
865 v4l2_fh_exit(fh);
866
867 /* Easy case first: this stream was never claimed by us */
868 if (s->id != id->open_id) {
869 kfree(id);
870 return 0;
871 }
872
873 /* 'Unclaim' this stream */
874
875 /* Stop radio */ 864 /* Stop radio */
876 mutex_lock(&itv->serialize_lock); 865 if (id->type == IVTV_ENC_STREAM_TYPE_RAD &&
877 if (id->type == IVTV_ENC_STREAM_TYPE_RAD) { 866 v4l2_fh_is_singular_file(filp)) {
878 /* Closing radio device, return to TV mode */ 867 /* Closing radio device, return to TV mode */
879 ivtv_mute(itv); 868 ivtv_mute(itv);
880 /* Mark that the radio is no longer in use */ 869 /* Mark that the radio is no longer in use */
@@ -890,13 +879,25 @@ int ivtv_v4l2_close(struct file *filp)
890 if (atomic_read(&itv->capturing) > 0) { 879 if (atomic_read(&itv->capturing) > 0) {
891 /* Undo video mute */ 880 /* Undo video mute */
892 ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1, 881 ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
893 v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) | 882 v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
894 (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8)); 883 (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
895 } 884 }
896 /* Done! Unmute and continue. */ 885 /* Done! Unmute and continue. */
897 ivtv_unmute(itv); 886 ivtv_unmute(itv);
898 ivtv_release_stream(s); 887 }
899 } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { 888
889 v4l2_fh_del(fh);
890 v4l2_fh_exit(fh);
891
892 /* Easy case first: this stream was never claimed by us */
893 if (s->fh != &id->fh) {
894 kfree(id);
895 return 0;
896 }
897
898 /* 'Unclaim' this stream */
899
900 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
900 struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT]; 901 struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];
901 902
902 ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0); 903 ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
@@ -911,21 +912,25 @@ int ivtv_v4l2_close(struct file *filp)
911 ivtv_stop_capture(id, 0); 912 ivtv_stop_capture(id, 0);
912 } 913 }
913 kfree(id); 914 kfree(id);
914 mutex_unlock(&itv->serialize_lock);
915 return 0; 915 return 0;
916} 916}
917 917
918static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp) 918int ivtv_v4l2_open(struct file *filp)
919{ 919{
920#ifdef CONFIG_VIDEO_ADV_DEBUG
921 struct video_device *vdev = video_devdata(filp); 920 struct video_device *vdev = video_devdata(filp);
922#endif 921 struct ivtv_stream *s = video_get_drvdata(vdev);
923 struct ivtv *itv = s->itv; 922 struct ivtv *itv = s->itv;
924 struct ivtv_open_id *item; 923 struct ivtv_open_id *item;
925 int res = 0; 924 int res = 0;
926 925
927 IVTV_DEBUG_FILE("open %s\n", s->name); 926 IVTV_DEBUG_FILE("open %s\n", s->name);
928 927
928 if (ivtv_init_on_first_open(itv)) {
929 IVTV_ERR("Failed to initialize on device %s\n",
930 video_device_node_name(vdev));
931 return -ENXIO;
932 }
933
929#ifdef CONFIG_VIDEO_ADV_DEBUG 934#ifdef CONFIG_VIDEO_ADV_DEBUG
930 /* Unless ivtv_fw_debug is set, error out if firmware dead. */ 935 /* Unless ivtv_fw_debug is set, error out if firmware dead. */
931 if (ivtv_fw_debug) { 936 if (ivtv_fw_debug) {
@@ -966,31 +971,19 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
966 return -ENOMEM; 971 return -ENOMEM;
967 } 972 }
968 v4l2_fh_init(&item->fh, s->vdev); 973 v4l2_fh_init(&item->fh, s->vdev);
969 if (res < 0) {
970 v4l2_fh_exit(&item->fh);
971 kfree(item);
972 return res;
973 }
974 item->itv = itv; 974 item->itv = itv;
975 item->type = s->type; 975 item->type = s->type;
976 976
977 item->open_id = itv->open_id++;
978 filp->private_data = &item->fh; 977 filp->private_data = &item->fh;
978 v4l2_fh_add(&item->fh);
979 979
980 if (item->type == IVTV_ENC_STREAM_TYPE_RAD) { 980 if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
981 /* Try to claim this stream */ 981 v4l2_fh_is_singular_file(filp)) {
982 if (ivtv_claim_stream(item, item->type)) {
983 /* No, it's already in use */
984 v4l2_fh_exit(&item->fh);
985 kfree(item);
986 return -EBUSY;
987 }
988
989 if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) { 982 if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
990 if (atomic_read(&itv->capturing) > 0) { 983 if (atomic_read(&itv->capturing) > 0) {
991 /* switching to radio while capture is 984 /* switching to radio while capture is
992 in progress is not polite */ 985 in progress is not polite */
993 ivtv_release_stream(s); 986 v4l2_fh_del(&item->fh);
994 v4l2_fh_exit(&item->fh); 987 v4l2_fh_exit(&item->fh);
995 kfree(item); 988 kfree(item);
996 return -EBUSY; 989 return -EBUSY;
@@ -1022,32 +1015,9 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
1022 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31); 1015 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
1023 itv->yuv_info.stream_size = 0; 1016 itv->yuv_info.stream_size = 0;
1024 } 1017 }
1025 v4l2_fh_add(&item->fh);
1026 return 0; 1018 return 0;
1027} 1019}
1028 1020
1029int ivtv_v4l2_open(struct file *filp)
1030{
1031 int res;
1032 struct ivtv *itv = NULL;
1033 struct ivtv_stream *s = NULL;
1034 struct video_device *vdev = video_devdata(filp);
1035
1036 s = video_get_drvdata(vdev);
1037 itv = s->itv;
1038
1039 mutex_lock(&itv->serialize_lock);
1040 if (ivtv_init_on_first_open(itv)) {
1041 IVTV_ERR("Failed to initialize on device %s\n",
1042 video_device_node_name(vdev));
1043 mutex_unlock(&itv->serialize_lock);
1044 return -ENXIO;
1045 }
1046 res = ivtv_serialized_open(s, filp);
1047 mutex_unlock(&itv->serialize_lock);
1048 return res;
1049}
1050
1051void ivtv_mute(struct ivtv *itv) 1021void ivtv_mute(struct ivtv *itv)
1052{ 1022{
1053 if (atomic_read(&itv->capturing)) 1023 if (atomic_read(&itv->capturing))
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index ecafa697326..c4bc4814309 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -179,6 +179,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
179 ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0); 179 ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
180 180
181 /* Wait for any DMA to finish */ 181 /* Wait for any DMA to finish */
182 mutex_unlock(&itv->serialize_lock);
182 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); 183 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
183 while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) { 184 while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
184 got_sig = signal_pending(current); 185 got_sig = signal_pending(current);
@@ -188,6 +189,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
188 schedule(); 189 schedule();
189 } 190 }
190 finish_wait(&itv->dma_waitq, &wait); 191 finish_wait(&itv->dma_waitq, &wait);
192 mutex_lock(&itv->serialize_lock);
191 if (got_sig) 193 if (got_sig)
192 return -EINTR; 194 return -EINTR;
193 195
@@ -1107,6 +1109,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
1107 * happens within the first 100 lines of the top field. 1109 * happens within the first 100 lines of the top field.
1108 * Make 4 attempts to sync to the decoder before giving up. 1110 * Make 4 attempts to sync to the decoder before giving up.
1109 */ 1111 */
1112 mutex_unlock(&itv->serialize_lock);
1110 for (f = 0; f < 4; f++) { 1113 for (f = 0; f < 4; f++) {
1111 prepare_to_wait(&itv->vsync_waitq, &wait, 1114 prepare_to_wait(&itv->vsync_waitq, &wait,
1112 TASK_UNINTERRUPTIBLE); 1115 TASK_UNINTERRUPTIBLE);
@@ -1115,6 +1118,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
1115 schedule_timeout(msecs_to_jiffies(25)); 1118 schedule_timeout(msecs_to_jiffies(25));
1116 } 1119 }
1117 finish_wait(&itv->vsync_waitq, &wait); 1120 finish_wait(&itv->vsync_waitq, &wait);
1121 mutex_lock(&itv->serialize_lock);
1118 1122
1119 if (f == 4) 1123 if (f == 4)
1120 IVTV_WARN("Mode change failed to sync to decoder\n"); 1124 IVTV_WARN("Mode change failed to sync to decoder\n");
@@ -1842,8 +1846,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
1842 return 0; 1846 return 0;
1843} 1847}
1844 1848
1845static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp, 1849long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1846 unsigned int cmd, unsigned long arg)
1847{ 1850{
1848 struct video_device *vfd = video_devdata(filp); 1851 struct video_device *vfd = video_devdata(filp);
1849 long ret; 1852 long ret;
@@ -1855,21 +1858,6 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
1855 return ret; 1858 return ret;
1856} 1859}
1857 1860
1858long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1859{
1860 struct ivtv_open_id *id = fh2id(filp->private_data);
1861 struct ivtv *itv = id->itv;
1862 long res;
1863
1864 /* DQEVENT can block, so this should not run with the serialize lock */
1865 if (cmd == VIDIOC_DQEVENT)
1866 return ivtv_serialized_ioctl(itv, filp, cmd, arg);
1867 mutex_lock(&itv->serialize_lock);
1868 res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
1869 mutex_unlock(&itv->serialize_lock);
1870 return res;
1871}
1872
1873static const struct v4l2_ioctl_ops ivtv_ioctl_ops = { 1861static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
1874 .vidioc_querycap = ivtv_querycap, 1862 .vidioc_querycap = ivtv_querycap,
1875 .vidioc_s_audio = ivtv_s_audio, 1863 .vidioc_s_audio = ivtv_s_audio,
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 9c29e964d40..1b3b9578bf4 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -288,13 +288,13 @@ static void dma_post(struct ivtv_stream *s)
288 ivtv_process_vbi_data(itv, buf, 0, s->type); 288 ivtv_process_vbi_data(itv, buf, 0, s->type);
289 s->q_dma.bytesused += buf->bytesused; 289 s->q_dma.bytesused += buf->bytesused;
290 } 290 }
291 if (s->id == -1) { 291 if (s->fh == NULL) {
292 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); 292 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
293 return; 293 return;
294 } 294 }
295 } 295 }
296 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused); 296 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
297 if (s->id != -1) 297 if (s->fh)
298 wake_up(&s->waitq); 298 wake_up(&s->waitq);
299} 299}
300 300
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index e7794dc1330..c6e28b4ebbe 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -159,7 +159,6 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
159 s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size; 159 s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
160 spin_lock_init(&s->qlock); 160 spin_lock_init(&s->qlock);
161 init_waitqueue_head(&s->waitq); 161 init_waitqueue_head(&s->waitq);
162 s->id = -1;
163 s->sg_handle = IVTV_DMA_UNMAPPED; 162 s->sg_handle = IVTV_DMA_UNMAPPED;
164 ivtv_queue_init(&s->q_free); 163 ivtv_queue_init(&s->q_free);
165 ivtv_queue_init(&s->q_full); 164 ivtv_queue_init(&s->q_full);
@@ -214,6 +213,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
214 s->vdev->fops = ivtv_stream_info[type].fops; 213 s->vdev->fops = ivtv_stream_info[type].fops;
215 s->vdev->release = video_device_release; 214 s->vdev->release = video_device_release;
216 s->vdev->tvnorms = V4L2_STD_ALL; 215 s->vdev->tvnorms = V4L2_STD_ALL;
216 s->vdev->lock = &itv->serialize_lock;
217 set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags); 217 set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
218 ivtv_set_funcs(s->vdev); 218 ivtv_set_funcs(s->vdev);
219 return 0; 219 return 0;
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index dcbab6ad4c2..2ad65eb2983 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -1149,23 +1149,37 @@ int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src)
1149{ 1149{
1150 struct yuv_playback_info *yi = &itv->yuv_info; 1150 struct yuv_playback_info *yi = &itv->yuv_info;
1151 struct ivtv_dma_frame dma_args; 1151 struct ivtv_dma_frame dma_args;
1152 int res;
1152 1153
1153 ivtv_yuv_setup_stream_frame(itv); 1154 ivtv_yuv_setup_stream_frame(itv);
1154 1155
1155 /* We only need to supply source addresses for this */ 1156 /* We only need to supply source addresses for this */
1156 dma_args.y_source = src; 1157 dma_args.y_source = src;
1157 dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31); 1158 dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
1158 return ivtv_yuv_udma_frame(itv, &dma_args); 1159 /* Wait for frame DMA. Note that serialize_lock is locked,
1160 so to allow other processes to access the driver while
1161 we are waiting unlock first and later lock again. */
1162 mutex_unlock(&itv->serialize_lock);
1163 res = ivtv_yuv_udma_frame(itv, &dma_args);
1164 mutex_lock(&itv->serialize_lock);
1165 return res;
1159} 1166}
1160 1167
1161/* IVTV_IOC_DMA_FRAME ioctl handler */ 1168/* IVTV_IOC_DMA_FRAME ioctl handler */
1162int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args) 1169int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
1163{ 1170{
1164/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */ 1171 int res;
1165 1172
1173/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
1166 ivtv_yuv_next_free(itv); 1174 ivtv_yuv_next_free(itv);
1167 ivtv_yuv_setup_frame(itv, args); 1175 ivtv_yuv_setup_frame(itv, args);
1168 return ivtv_yuv_udma_frame(itv, args); 1176 /* Wait for frame DMA. Note that serialize_lock is locked,
1177 so to allow other processes to access the driver while
1178 we are waiting unlock first and later lock again. */
1179 mutex_unlock(&itv->serialize_lock);
1180 res = ivtv_yuv_udma_frame(itv, args);
1181 mutex_lock(&itv->serialize_lock);
1182 return res;
1169} 1183}
1170 1184
1171void ivtv_yuv_close(struct ivtv *itv) 1185void ivtv_yuv_close(struct ivtv *itv)
@@ -1174,7 +1188,9 @@ void ivtv_yuv_close(struct ivtv *itv)
1174 int h_filter, v_filter_1, v_filter_2; 1188 int h_filter, v_filter_1, v_filter_2;
1175 1189
1176 IVTV_DEBUG_YUV("ivtv_yuv_close\n"); 1190 IVTV_DEBUG_YUV("ivtv_yuv_close\n");
1191 mutex_unlock(&itv->serialize_lock);
1177 ivtv_waitq(&itv->vsync_waitq); 1192 ivtv_waitq(&itv->vsync_waitq);
1193 mutex_lock(&itv->serialize_lock);
1178 1194
1179 yi->running = 0; 1195 yi->running = 0;
1180 atomic_set(&yi->next_dma_frame, -1); 1196 atomic_set(&yi->next_dma_frame, -1);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 0cb461dd396..74522773e93 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -287,7 +287,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
287 sg_dma_len(sg) = new_size; 287 sg_dma_len(sg) = new_size;
288 288
289 txd = ichan->dma_chan.device->device_prep_slave_sg( 289 txd = ichan->dma_chan.device->device_prep_slave_sg(
290 &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE, 290 &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
291 DMA_PREP_INTERRUPT); 291 DMA_PREP_INTERRUPT);
292 if (!txd) 292 if (!txd)
293 goto error; 293 goto error;
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index a277f95091e..1fb7d5bd5ec 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1042,7 +1042,8 @@ static int vidioc_querycap(struct file *file, void *fh,
1042 strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver)); 1042 strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
1043 strlcpy(cap->card, vout->vfd->name, sizeof(cap->card)); 1043 strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
1044 cap->bus_info[0] = '\0'; 1044 cap->bus_info[0] = '\0';
1045 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT; 1045 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
1046 V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
1046 1047
1047 return 0; 1048 return 0;
1048} 1049}
@@ -1825,7 +1826,9 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
1825 ovid = &vout->vid_info; 1826 ovid = &vout->vid_info;
1826 ovl = ovid->overlays[0]; 1827 ovl = ovid->overlays[0];
1827 1828
1828 a->flags = 0x0; 1829 /* The video overlay must stay within the framebuffer and can't be
1830 positioned independently. */
1831 a->flags = V4L2_FBUF_FLAG_OVERLAY;
1829 a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY 1832 a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
1830 | V4L2_FBUF_CAP_SRC_CHROMAKEY; 1833 | V4L2_FBUF_CAP_SRC_CHROMAKEY;
1831 1834
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 905d41d90c6..1f506fde97d 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -104,47 +104,16 @@ static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] =
104 104
105/****************************************************************************/ 105/****************************************************************************/
106 106
107static int _send_control_msg(struct pwc_device *pdev,
108 u8 request, u16 value, int index, void *buf, int buflen)
109{
110 int rc;
111 void *kbuf = NULL;
112
113 if (buflen) {
114 kbuf = kmemdup(buf, buflen, GFP_KERNEL); /* not allowed on stack */
115 if (kbuf == NULL)
116 return -ENOMEM;
117 }
118
119 rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
120 request,
121 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
122 value,
123 index,
124 kbuf, buflen, USB_CTRL_SET_TIMEOUT);
125
126 kfree(kbuf);
127 return rc;
128}
129
130static int recv_control_msg(struct pwc_device *pdev, 107static int recv_control_msg(struct pwc_device *pdev,
131 u8 request, u16 value, void *buf, int buflen) 108 u8 request, u16 value, int recv_count)
132{ 109{
133 int rc; 110 int rc;
134 void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
135
136 if (kbuf == NULL)
137 return -ENOMEM;
138 111
139 rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0), 112 rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0),
140 request, 113 request,
141 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 114 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
142 value, 115 value, pdev->vcinterface,
143 pdev->vcinterface, 116 pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT);
144 kbuf, buflen, USB_CTRL_GET_TIMEOUT);
145 memcpy(buf, kbuf, buflen);
146 kfree(kbuf);
147
148 if (rc < 0) 117 if (rc < 0)
149 PWC_ERROR("recv_control_msg error %d req %02x val %04x\n", 118 PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
150 rc, request, value); 119 rc, request, value);
@@ -152,27 +121,39 @@ static int recv_control_msg(struct pwc_device *pdev,
152} 121}
153 122
154static inline int send_video_command(struct pwc_device *pdev, 123static inline int send_video_command(struct pwc_device *pdev,
155 int index, void *buf, int buflen) 124 int index, const unsigned char *buf, int buflen)
156{ 125{
157 return _send_control_msg(pdev, 126 int rc;
158 SET_EP_STREAM_CTL, 127
159 VIDEO_OUTPUT_CONTROL_FORMATTER, 128 memcpy(pdev->ctrl_buf, buf, buflen);
160 index, 129
161 buf, buflen); 130 rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
131 SET_EP_STREAM_CTL,
132 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
133 VIDEO_OUTPUT_CONTROL_FORMATTER, index,
134 pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT);
135 if (rc >= 0)
136 memcpy(pdev->cmd_buf, buf, buflen);
137 else
138 PWC_ERROR("send_video_command error %d\n", rc);
139
140 return rc;
162} 141}
163 142
164int send_control_msg(struct pwc_device *pdev, 143int send_control_msg(struct pwc_device *pdev,
165 u8 request, u16 value, void *buf, int buflen) 144 u8 request, u16 value, void *buf, int buflen)
166{ 145{
167 return _send_control_msg(pdev, 146 return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
168 request, value, pdev->vcinterface, buf, buflen); 147 request,
148 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
149 value, pdev->vcinterface,
150 buf, buflen, USB_CTRL_SET_TIMEOUT);
169} 151}
170 152
171static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames, 153static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
172 int *compression) 154 int frames, int *compression, int send_to_cam)
173{ 155{
174 unsigned char buf[3]; 156 int fps, ret = 0;
175 int ret, fps;
176 struct Nala_table_entry *pEntry; 157 struct Nala_table_entry *pEntry;
177 int frames2frames[31] = 158 int frames2frames[31] =
178 { /* closest match of framerate */ 159 { /* closest match of framerate */
@@ -194,30 +175,29 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
194 7 /* 30 */ 175 7 /* 30 */
195 }; 176 };
196 177
197 if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25) 178 if (size < 0 || size > PSZ_CIF)
198 return -EINVAL; 179 return -EINVAL;
180 if (frames < 4)
181 frames = 4;
182 else if (frames > 25)
183 frames = 25;
199 frames = frames2frames[frames]; 184 frames = frames2frames[frames];
200 fps = frames2table[frames]; 185 fps = frames2table[frames];
201 pEntry = &Nala_table[size][fps]; 186 pEntry = &Nala_table[size][fps];
202 if (pEntry->alternate == 0) 187 if (pEntry->alternate == 0)
203 return -EINVAL; 188 return -EINVAL;
204 189
205 memcpy(buf, pEntry->mode, 3); 190 if (send_to_cam)
206 ret = send_video_command(pdev, pdev->vendpoint, buf, 3); 191 ret = send_video_command(pdev, pdev->vendpoint,
207 if (ret < 0) { 192 pEntry->mode, 3);
208 PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret); 193 if (ret < 0)
209 return ret; 194 return ret;
210 }
211 if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
212 ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
213 if (ret < 0)
214 return ret;
215 }
216 195
217 pdev->cmd_len = 3; 196 if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420)
218 memcpy(pdev->cmd_buf, buf, 3); 197 pwc_dec1_init(pdev, pEntry->mode);
219 198
220 /* Set various parameters */ 199 /* Set various parameters */
200 pdev->pixfmt = pixfmt;
221 pdev->vframes = frames; 201 pdev->vframes = frames;
222 pdev->valternate = pEntry->alternate; 202 pdev->valternate = pEntry->alternate;
223 pdev->width = pwc_image_sizes[size][0]; 203 pdev->width = pwc_image_sizes[size][0];
@@ -243,18 +223,20 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
243} 223}
244 224
245 225
246static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, 226static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
247 int *compression) 227 int frames, int *compression, int send_to_cam)
248{ 228{
249 unsigned char buf[13];
250 const struct Timon_table_entry *pChoose; 229 const struct Timon_table_entry *pChoose;
251 int ret, fps; 230 int fps, ret = 0;
252 231
253 if (size >= PSZ_MAX || frames < 5 || frames > 30 || 232 if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
254 *compression < 0 || *compression > 3)
255 return -EINVAL;
256 if (size == PSZ_VGA && frames > 15)
257 return -EINVAL; 233 return -EINVAL;
234 if (frames < 5)
235 frames = 5;
236 else if (size == PSZ_VGA && frames > 15)
237 frames = 15;
238 else if (frames > 30)
239 frames = 30;
258 fps = (frames / 5) - 1; 240 fps = (frames / 5) - 1;
259 241
260 /* Find a supported framerate with progressively higher compression */ 242 /* Find a supported framerate with progressively higher compression */
@@ -268,22 +250,18 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
268 if (pChoose == NULL || pChoose->alternate == 0) 250 if (pChoose == NULL || pChoose->alternate == 0)
269 return -ENOENT; /* Not supported. */ 251 return -ENOENT; /* Not supported. */
270 252
271 memcpy(buf, pChoose->mode, 13); 253 if (send_to_cam)
272 ret = send_video_command(pdev, pdev->vendpoint, buf, 13); 254 ret = send_video_command(pdev, pdev->vendpoint,
255 pChoose->mode, 13);
273 if (ret < 0) 256 if (ret < 0)
274 return ret; 257 return ret;
275 258
276 if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) { 259 if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
277 ret = pwc_dec23_init(pdev, pdev->type, buf); 260 pwc_dec23_init(pdev, pChoose->mode);
278 if (ret < 0)
279 return ret;
280 }
281
282 pdev->cmd_len = 13;
283 memcpy(pdev->cmd_buf, buf, 13);
284 261
285 /* Set various parameters */ 262 /* Set various parameters */
286 pdev->vframes = frames; 263 pdev->pixfmt = pixfmt;
264 pdev->vframes = (fps + 1) * 5;
287 pdev->valternate = pChoose->alternate; 265 pdev->valternate = pChoose->alternate;
288 pdev->width = pwc_image_sizes[size][0]; 266 pdev->width = pwc_image_sizes[size][0];
289 pdev->height = pwc_image_sizes[size][1]; 267 pdev->height = pwc_image_sizes[size][1];
@@ -296,18 +274,20 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
296} 274}
297 275
298 276
299static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, 277static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
300 int *compression) 278 int frames, int *compression, int send_to_cam)
301{ 279{
302 const struct Kiara_table_entry *pChoose = NULL; 280 const struct Kiara_table_entry *pChoose = NULL;
303 int fps, ret; 281 int fps, ret = 0;
304 unsigned char buf[12];
305 282
306 if (size >= PSZ_MAX || frames < 5 || frames > 30 || 283 if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
307 *compression < 0 || *compression > 3)
308 return -EINVAL;
309 if (size == PSZ_VGA && frames > 15)
310 return -EINVAL; 284 return -EINVAL;
285 if (frames < 5)
286 frames = 5;
287 else if (size == PSZ_VGA && frames > 15)
288 frames = 15;
289 else if (frames > 30)
290 frames = 30;
311 fps = (frames / 5) - 1; 291 fps = (frames / 5) - 1;
312 292
313 /* Find a supported framerate with progressively higher compression */ 293 /* Find a supported framerate with progressively higher compression */
@@ -320,26 +300,18 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
320 if (pChoose == NULL || pChoose->alternate == 0) 300 if (pChoose == NULL || pChoose->alternate == 0)
321 return -ENOENT; /* Not supported. */ 301 return -ENOENT; /* Not supported. */
322 302
323 PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate);
324
325 /* usb_control_msg won't take staticly allocated arrays as argument?? */
326 memcpy(buf, pChoose->mode, 12);
327
328 /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */ 303 /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
329 ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12); 304 if (send_to_cam)
305 ret = send_video_command(pdev, 4, pChoose->mode, 12);
330 if (ret < 0) 306 if (ret < 0)
331 return ret; 307 return ret;
332 308
333 if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) { 309 if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
334 ret = pwc_dec23_init(pdev, pdev->type, buf); 310 pwc_dec23_init(pdev, pChoose->mode);
335 if (ret < 0)
336 return ret;
337 }
338 311
339 pdev->cmd_len = 12;
340 memcpy(pdev->cmd_buf, buf, 12);
341 /* All set and go */ 312 /* All set and go */
342 pdev->vframes = frames; 313 pdev->pixfmt = pixfmt;
314 pdev->vframes = (fps + 1) * 5;
343 pdev->valternate = pChoose->alternate; 315 pdev->valternate = pChoose->alternate;
344 pdev->width = pwc_image_sizes[size][0]; 316 pdev->width = pwc_image_sizes[size][0];
345 pdev->height = pwc_image_sizes[size][1]; 317 pdev->height = pwc_image_sizes[size][1];
@@ -354,22 +326,24 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
354} 326}
355 327
356int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, 328int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
357 int frames, int *compression) 329 int pixfmt, int frames, int *compression, int send_to_cam)
358{ 330{
359 int ret, size; 331 int ret, size;
360 332
361 PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt); 333 PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n",
334 width, height, frames, pixfmt);
362 size = pwc_get_size(pdev, width, height); 335 size = pwc_get_size(pdev, width, height);
363 PWC_TRACE("decode_size = %d.\n", size); 336 PWC_TRACE("decode_size = %d.\n", size);
364 337
365 if (DEVICE_USE_CODEC1(pdev->type)) { 338 if (DEVICE_USE_CODEC1(pdev->type)) {
366 ret = set_video_mode_Nala(pdev, size, frames, compression); 339 ret = set_video_mode_Nala(pdev, size, pixfmt, frames,
367 340 compression, send_to_cam);
368 } else if (DEVICE_USE_CODEC3(pdev->type)) { 341 } else if (DEVICE_USE_CODEC3(pdev->type)) {
369 ret = set_video_mode_Kiara(pdev, size, frames, compression); 342 ret = set_video_mode_Kiara(pdev, size, pixfmt, frames,
370 343 compression, send_to_cam);
371 } else { 344 } else {
372 ret = set_video_mode_Timon(pdev, size, frames, compression); 345 ret = set_video_mode_Timon(pdev, size, pixfmt, frames,
346 compression, send_to_cam);
373 } 347 }
374 if (ret < 0) { 348 if (ret < 0) {
375 PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret); 349 PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
@@ -436,13 +410,12 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
436int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) 410int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
437{ 411{
438 int ret; 412 int ret;
439 u8 buf;
440 413
441 ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf)); 414 ret = recv_control_msg(pdev, request, value, 1);
442 if (ret < 0) 415 if (ret < 0)
443 return ret; 416 return ret;
444 417
445 *data = buf; 418 *data = pdev->ctrl_buf[0];
446 return 0; 419 return 0;
447} 420}
448 421
@@ -450,7 +423,8 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
450{ 423{
451 int ret; 424 int ret;
452 425
453 ret = send_control_msg(pdev, request, value, &data, sizeof(data)); 426 pdev->ctrl_buf[0] = data;
427 ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1);
454 if (ret < 0) 428 if (ret < 0)
455 return ret; 429 return ret;
456 430
@@ -460,37 +434,34 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
460int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) 434int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
461{ 435{
462 int ret; 436 int ret;
463 s8 buf;
464 437
465 ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf)); 438 ret = recv_control_msg(pdev, request, value, 1);
466 if (ret < 0) 439 if (ret < 0)
467 return ret; 440 return ret;
468 441
469 *data = buf; 442 *data = ((s8 *)pdev->ctrl_buf)[0];
470 return 0; 443 return 0;
471} 444}
472 445
473int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) 446int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
474{ 447{
475 int ret; 448 int ret;
476 u8 buf[2];
477 449
478 ret = recv_control_msg(pdev, request, value, buf, sizeof(buf)); 450 ret = recv_control_msg(pdev, request, value, 2);
479 if (ret < 0) 451 if (ret < 0)
480 return ret; 452 return ret;
481 453
482 *data = (buf[1] << 8) | buf[0]; 454 *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0];
483 return 0; 455 return 0;
484} 456}
485 457
486int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data) 458int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
487{ 459{
488 int ret; 460 int ret;
489 u8 buf[2];
490 461
491 buf[0] = data & 0xff; 462 pdev->ctrl_buf[0] = data & 0xff;
492 buf[1] = data >> 8; 463 pdev->ctrl_buf[1] = data >> 8;
493 ret = send_control_msg(pdev, request, value, buf, sizeof(buf)); 464 ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2);
494 if (ret < 0) 465 if (ret < 0)
495 return ret; 466 return ret;
496 467
@@ -511,7 +482,6 @@ int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
511/* POWER */ 482/* POWER */
512void pwc_camera_power(struct pwc_device *pdev, int power) 483void pwc_camera_power(struct pwc_device *pdev, int power)
513{ 484{
514 char buf;
515 int r; 485 int r;
516 486
517 if (!pdev->power_save) 487 if (!pdev->power_save)
@@ -521,13 +491,11 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
521 return; /* Not supported by Nala or Timon < release 6 */ 491 return; /* Not supported by Nala or Timon < release 6 */
522 492
523 if (power) 493 if (power)
524 buf = 0x00; /* active */ 494 pdev->ctrl_buf[0] = 0x00; /* active */
525 else 495 else
526 buf = 0xFF; /* power save */ 496 pdev->ctrl_buf[0] = 0xFF; /* power save */
527 r = send_control_msg(pdev, 497 r = send_control_msg(pdev, SET_STATUS_CTL,
528 SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER, 498 SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1);
529 &buf, sizeof(buf));
530
531 if (r < 0) 499 if (r < 0)
532 PWC_ERROR("Failed to power %s camera (%d)\n", 500 PWC_ERROR("Failed to power %s camera (%d)\n",
533 power ? "on" : "off", r); 501 power ? "on" : "off", r);
@@ -535,7 +503,6 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
535 503
536int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value) 504int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
537{ 505{
538 unsigned char buf[2];
539 int r; 506 int r;
540 507
541 if (pdev->type < 730) 508 if (pdev->type < 730)
@@ -551,11 +518,11 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
551 if (off_value > 0xff) 518 if (off_value > 0xff)
552 off_value = 0xff; 519 off_value = 0xff;
553 520
554 buf[0] = on_value; 521 pdev->ctrl_buf[0] = on_value;
555 buf[1] = off_value; 522 pdev->ctrl_buf[1] = off_value;
556 523
557 r = send_control_msg(pdev, 524 r = send_control_msg(pdev,
558 SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf)); 525 SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2);
559 if (r < 0) 526 if (r < 0)
560 PWC_ERROR("Failed to set LED on/off time (%d)\n", r); 527 PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
561 528
@@ -565,7 +532,6 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
565#ifdef CONFIG_USB_PWC_DEBUG 532#ifdef CONFIG_USB_PWC_DEBUG
566int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor) 533int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
567{ 534{
568 unsigned char buf;
569 int ret = -1, request; 535 int ret = -1, request;
570 536
571 if (pdev->type < 675) 537 if (pdev->type < 675)
@@ -575,14 +541,13 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
575 else 541 else
576 request = SENSOR_TYPE_FORMATTER2; 542 request = SENSOR_TYPE_FORMATTER2;
577 543
578 ret = recv_control_msg(pdev, 544 ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1);
579 GET_STATUS_CTL, request, &buf, sizeof(buf));
580 if (ret < 0) 545 if (ret < 0)
581 return ret; 546 return ret;
582 if (pdev->type < 675) 547 if (pdev->type < 675)
583 *sensor = buf | 0x100; 548 *sensor = pdev->ctrl_buf[0] | 0x100;
584 else 549 else
585 *sensor = buf; 550 *sensor = pdev->ctrl_buf[0];
586 return 0; 551 return 0;
587} 552}
588#endif 553#endif
diff --git a/drivers/media/video/pwc/pwc-dec1.c b/drivers/media/video/pwc/pwc-dec1.c
index be0e02cb487..e899036aadf 100644
--- a/drivers/media/video/pwc/pwc-dec1.c
+++ b/drivers/media/video/pwc/pwc-dec1.c
@@ -22,19 +22,11 @@
22 along with this program; if not, write to the Free Software 22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24*/ 24*/
25#include "pwc-dec1.h" 25#include "pwc.h"
26 26
27int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer) 27void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd)
28{ 28{
29 struct pwc_dec1_private *pdec; 29 struct pwc_dec1_private *pdec = &pdev->dec1;
30 30
31 if (pwc->decompress_data == NULL) { 31 pdec->version = pdev->release;
32 pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
33 if (pdec == NULL)
34 return -ENOMEM;
35 pwc->decompress_data = pdec;
36 }
37 pdec = pwc->decompress_data;
38
39 return 0;
40} 32}
diff --git a/drivers/media/video/pwc/pwc-dec1.h b/drivers/media/video/pwc/pwc-dec1.h
index a57d8601080..c565ef8f52f 100644
--- a/drivers/media/video/pwc/pwc-dec1.h
+++ b/drivers/media/video/pwc/pwc-dec1.h
@@ -25,13 +25,15 @@
25#ifndef PWC_DEC1_H 25#ifndef PWC_DEC1_H
26#define PWC_DEC1_H 26#define PWC_DEC1_H
27 27
28#include "pwc.h" 28#include <linux/mutex.h>
29
30struct pwc_device;
29 31
30struct pwc_dec1_private 32struct pwc_dec1_private
31{ 33{
32 int version; 34 int version;
33}; 35};
34 36
35int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer); 37void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd);
36 38
37#endif 39#endif
diff --git a/drivers/media/video/pwc/pwc-dec23.c b/drivers/media/video/pwc/pwc-dec23.c
index 2c6709112b2..3792fedff95 100644
--- a/drivers/media/video/pwc/pwc-dec23.c
+++ b/drivers/media/video/pwc/pwc-dec23.c
@@ -294,22 +294,17 @@ static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE];
294 294
295 295
296/* If the type or the command change, we rebuild the lookup table */ 296/* If the type or the command change, we rebuild the lookup table */
297int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd) 297void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd)
298{ 298{
299 int flags, version, shift, i; 299 int flags, version, shift, i;
300 struct pwc_dec23_private *pdec; 300 struct pwc_dec23_private *pdec = &pdev->dec23;
301
302 if (pwc->decompress_data == NULL) {
303 pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
304 if (pdec == NULL)
305 return -ENOMEM;
306 pwc->decompress_data = pdec;
307 }
308 pdec = pwc->decompress_data;
309 301
310 mutex_init(&pdec->lock); 302 mutex_init(&pdec->lock);
311 303
312 if (DEVICE_USE_CODEC3(type)) { 304 if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2])
305 return;
306
307 if (DEVICE_USE_CODEC3(pdev->type)) {
313 flags = cmd[2] & 0x18; 308 flags = cmd[2] & 0x18;
314 if (flags == 8) 309 if (flags == 8)
315 pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */ 310 pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */
@@ -356,7 +351,8 @@ int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
356 pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255; 351 pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255;
357#endif 352#endif
358 353
359 return 0; 354 pdec->last_cmd = cmd[2];
355 pdec->last_cmd_valid = 1;
360} 356}
361 357
362/* 358/*
@@ -659,12 +655,12 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
659 * src: raw data 655 * src: raw data
660 * dst: image output 656 * dst: image output
661 */ 657 */
662void pwc_dec23_decompress(const struct pwc_device *pwc, 658void pwc_dec23_decompress(struct pwc_device *pdev,
663 const void *src, 659 const void *src,
664 void *dst) 660 void *dst)
665{ 661{
666 int bandlines_left, bytes_per_block; 662 int bandlines_left, bytes_per_block;
667 struct pwc_dec23_private *pdec = pwc->decompress_data; 663 struct pwc_dec23_private *pdec = &pdev->dec23;
668 664
669 /* YUV420P image format */ 665 /* YUV420P image format */
670 unsigned char *pout_planar_y; 666 unsigned char *pout_planar_y;
@@ -674,23 +670,22 @@ void pwc_dec23_decompress(const struct pwc_device *pwc,
674 670
675 mutex_lock(&pdec->lock); 671 mutex_lock(&pdec->lock);
676 672
677 bandlines_left = pwc->height / 4; 673 bandlines_left = pdev->height / 4;
678 bytes_per_block = pwc->width * 4; 674 bytes_per_block = pdev->width * 4;
679 plane_size = pwc->height * pwc->width; 675 plane_size = pdev->height * pdev->width;
680 676
681 pout_planar_y = dst; 677 pout_planar_y = dst;
682 pout_planar_u = dst + plane_size; 678 pout_planar_u = dst + plane_size;
683 pout_planar_v = dst + plane_size + plane_size / 4; 679 pout_planar_v = dst + plane_size + plane_size / 4;
684 680
685 while (bandlines_left--) { 681 while (bandlines_left--) {
686 DecompressBand23(pwc->decompress_data, 682 DecompressBand23(pdec, src,
687 src,
688 pout_planar_y, pout_planar_u, pout_planar_v, 683 pout_planar_y, pout_planar_u, pout_planar_v,
689 pwc->width, pwc->width); 684 pdev->width, pdev->width);
690 src += pwc->vbandlength; 685 src += pdev->vbandlength;
691 pout_planar_y += bytes_per_block; 686 pout_planar_y += bytes_per_block;
692 pout_planar_u += pwc->width; 687 pout_planar_u += pdev->width;
693 pout_planar_v += pwc->width; 688 pout_planar_v += pdev->width;
694 } 689 }
695 mutex_unlock(&pdec->lock); 690 mutex_unlock(&pdec->lock);
696} 691}
diff --git a/drivers/media/video/pwc/pwc-dec23.h b/drivers/media/video/pwc/pwc-dec23.h
index d64a3c281af..c655b1c1e6a 100644
--- a/drivers/media/video/pwc/pwc-dec23.h
+++ b/drivers/media/video/pwc/pwc-dec23.h
@@ -25,17 +25,20 @@
25#ifndef PWC_DEC23_H 25#ifndef PWC_DEC23_H
26#define PWC_DEC23_H 26#define PWC_DEC23_H
27 27
28#include "pwc.h" 28struct pwc_device;
29 29
30struct pwc_dec23_private 30struct pwc_dec23_private
31{ 31{
32 struct mutex lock; 32 struct mutex lock;
33 33
34 unsigned char last_cmd, last_cmd_valid;
35
34 unsigned int scalebits; 36 unsigned int scalebits;
35 unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */ 37 unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */
36 38
37 unsigned int reservoir; 39 unsigned int reservoir;
38 unsigned int nbits_in_reservoir; 40 unsigned int nbits_in_reservoir;
41
39 const unsigned char *stream; 42 const unsigned char *stream;
40 int temp_colors[16]; 43 int temp_colors[16];
41 44
@@ -51,8 +54,8 @@ struct pwc_dec23_private
51 54
52}; 55};
53 56
54int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd); 57void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd);
55void pwc_dec23_decompress(const struct pwc_device *pwc, 58void pwc_dec23_decompress(struct pwc_device *pdev,
56 const void *src, 59 const void *src,
57 void *dst); 60 void *dst);
58#endif 61#endif
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 943d37ad0d3..122fbd0081e 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -128,18 +128,11 @@ static struct usb_driver pwc_driver = {
128#define MAX_DEV_HINTS 20 128#define MAX_DEV_HINTS 20
129#define MAX_ISOC_ERRORS 20 129#define MAX_ISOC_ERRORS 20
130 130
131static int default_fps = 10;
132#ifdef CONFIG_USB_PWC_DEBUG 131#ifdef CONFIG_USB_PWC_DEBUG
133 int pwc_trace = PWC_DEBUG_LEVEL; 132 int pwc_trace = PWC_DEBUG_LEVEL;
134#endif 133#endif
135static int power_save = -1; 134static int power_save = -1;
136static int led_on = 100, led_off; /* defaults to LED that is on while in use */ 135static int leds[2] = { 100, 0 };
137static struct {
138 int type;
139 char serial_number[30];
140 int device_node;
141 struct pwc_device *pdev;
142} device_hint[MAX_DEV_HINTS];
143 136
144/***/ 137/***/
145 138
@@ -386,8 +379,8 @@ static int pwc_isoc_init(struct pwc_device *pdev)
386retry: 379retry:
387 /* We first try with low compression and then retry with a higher 380 /* We first try with low compression and then retry with a higher
388 compression setting if there is not enough bandwidth. */ 381 compression setting if there is not enough bandwidth. */
389 ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, 382 ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
390 pdev->vframes, &compression); 383 pdev->vframes, &compression, 1);
391 384
392 /* Get the current alternate interface, adjust packet size */ 385 /* Get the current alternate interface, adjust packet size */
393 intf = usb_ifnum_to_if(udev, 0); 386 intf = usb_ifnum_to_if(udev, 0);
@@ -597,23 +590,9 @@ leave:
597static void pwc_video_release(struct v4l2_device *v) 590static void pwc_video_release(struct v4l2_device *v)
598{ 591{
599 struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev); 592 struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
600 int hint;
601
602 /* search device_hint[] table if we occupy a slot, by any chance */
603 for (hint = 0; hint < MAX_DEV_HINTS; hint++)
604 if (device_hint[hint].pdev == pdev)
605 device_hint[hint].pdev = NULL;
606
607 /* Free intermediate decompression buffer & tables */
608 if (pdev->decompress_data != NULL) {
609 PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
610 pdev->decompress_data);
611 kfree(pdev->decompress_data);
612 pdev->decompress_data = NULL;
613 }
614 593
615 v4l2_ctrl_handler_free(&pdev->ctrl_handler); 594 v4l2_ctrl_handler_free(&pdev->ctrl_handler);
616 595 kfree(pdev->ctrl_buf);
617 kfree(pdev); 596 kfree(pdev);
618} 597}
619 598
@@ -758,7 +737,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
758 737
759 /* Turn on camera and set LEDS on */ 738 /* Turn on camera and set LEDS on */
760 pwc_camera_power(pdev, 1); 739 pwc_camera_power(pdev, 1);
761 pwc_set_leds(pdev, led_on, led_off); 740 pwc_set_leds(pdev, leds[0], leds[1]);
762 741
763 r = pwc_isoc_init(pdev); 742 r = pwc_isoc_init(pdev);
764 if (r) { 743 if (r) {
@@ -813,10 +792,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
813 struct usb_device *udev = interface_to_usbdev(intf); 792 struct usb_device *udev = interface_to_usbdev(intf);
814 struct pwc_device *pdev = NULL; 793 struct pwc_device *pdev = NULL;
815 int vendor_id, product_id, type_id; 794 int vendor_id, product_id, type_id;
816 int hint, rc; 795 int rc;
817 int features = 0; 796 int features = 0;
818 int compression = 0; 797 int compression = 0;
819 int video_nr = -1; /* default: use next available device */
820 int my_power_save = power_save; 798 int my_power_save = power_save;
821 char serial_number[30], *name; 799 char serial_number[30], *name;
822 800
@@ -1076,7 +1054,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1076 return -ENOMEM; 1054 return -ENOMEM;
1077 } 1055 }
1078 pdev->type = type_id; 1056 pdev->type = type_id;
1079 pdev->vframes = default_fps;
1080 pdev->features = features; 1057 pdev->features = features;
1081 pwc_construct(pdev); /* set min/max sizes correct */ 1058 pwc_construct(pdev); /* set min/max sizes correct */
1082 1059
@@ -1107,24 +1084,14 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1107 pdev->release = le16_to_cpu(udev->descriptor.bcdDevice); 1084 pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
1108 PWC_DEBUG_PROBE("Release: %04x\n", pdev->release); 1085 PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
1109 1086
1110 /* Now search device_hint[] table for a match, so we can hint a node number. */ 1087 /* Allocate USB command buffers */
1111 for (hint = 0; hint < MAX_DEV_HINTS; hint++) { 1088 pdev->ctrl_buf = kmalloc(sizeof(pdev->cmd_buf), GFP_KERNEL);
1112 if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) && 1089 if (!pdev->ctrl_buf) {
1113 (device_hint[hint].pdev == NULL)) { 1090 PWC_ERROR("Oops, could not allocate memory for pwc_device.\n");
1114 /* so far, so good... try serial number */ 1091 rc = -ENOMEM;
1115 if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) { 1092 goto err_free_mem;
1116 /* match! */
1117 video_nr = device_hint[hint].device_node;
1118 PWC_DEBUG_PROBE("Found hint, will try to register as /dev/video%d\n", video_nr);
1119 break;
1120 }
1121 }
1122 } 1093 }
1123 1094
1124 /* occupy slot */
1125 if (hint < MAX_DEV_HINTS)
1126 device_hint[hint].pdev = pdev;
1127
1128#ifdef CONFIG_USB_PWC_DEBUG 1095#ifdef CONFIG_USB_PWC_DEBUG
1129 /* Query sensor type */ 1096 /* Query sensor type */
1130 if (pwc_get_cmos_sensor(pdev, &rc) >= 0) { 1097 if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
@@ -1138,8 +1105,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1138 pwc_set_leds(pdev, 0, 0); 1105 pwc_set_leds(pdev, 0, 0);
1139 1106
1140 /* Setup intial videomode */ 1107 /* Setup intial videomode */
1141 rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, pdev->vframes, 1108 rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
1142 &compression); 1109 V4L2_PIX_FMT_YUV420, 30, &compression, 1);
1143 if (rc) 1110 if (rc)
1144 goto err_free_mem; 1111 goto err_free_mem;
1145 1112
@@ -1164,7 +1131,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1164 pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler; 1131 pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
1165 pdev->vdev.v4l2_dev = &pdev->v4l2_dev; 1132 pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
1166 1133
1167 rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr); 1134 rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
1168 if (rc < 0) { 1135 if (rc < 0) {
1169 PWC_ERROR("Failed to register as video device (%d).\n", rc); 1136 PWC_ERROR("Failed to register as video device (%d).\n", rc);
1170 goto err_unregister_v4l2_dev; 1137 goto err_unregister_v4l2_dev;
@@ -1207,8 +1174,7 @@ err_unregister_v4l2_dev:
1207err_free_controls: 1174err_free_controls:
1208 v4l2_ctrl_handler_free(&pdev->ctrl_handler); 1175 v4l2_ctrl_handler_free(&pdev->ctrl_handler);
1209err_free_mem: 1176err_free_mem:
1210 if (hint < MAX_DEV_HINTS) 1177 kfree(pdev->ctrl_buf);
1211 device_hint[hint].pdev = NULL;
1212 kfree(pdev); 1178 kfree(pdev);
1213 return rc; 1179 return rc;
1214} 1180}
@@ -1243,27 +1209,19 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
1243 * Initialization code & module stuff 1209 * Initialization code & module stuff
1244 */ 1210 */
1245 1211
1246static int fps;
1247static int leds[2] = { -1, -1 };
1248static unsigned int leds_nargs; 1212static unsigned int leds_nargs;
1249static char *dev_hint[MAX_DEV_HINTS];
1250static unsigned int dev_hint_nargs;
1251 1213
1252module_param(fps, int, 0444);
1253#ifdef CONFIG_USB_PWC_DEBUG 1214#ifdef CONFIG_USB_PWC_DEBUG
1254module_param_named(trace, pwc_trace, int, 0644); 1215module_param_named(trace, pwc_trace, int, 0644);
1255#endif 1216#endif
1256module_param(power_save, int, 0644); 1217module_param(power_save, int, 0644);
1257module_param_array(leds, int, &leds_nargs, 0444); 1218module_param_array(leds, int, &leds_nargs, 0444);
1258module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
1259 1219
1260MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
1261#ifdef CONFIG_USB_PWC_DEBUG 1220#ifdef CONFIG_USB_PWC_DEBUG
1262MODULE_PARM_DESC(trace, "For debugging purposes"); 1221MODULE_PARM_DESC(trace, "For debugging purposes");
1263#endif 1222#endif
1264MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off"); 1223MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
1265MODULE_PARM_DESC(leds, "LED on,off time in milliseconds"); 1224MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
1266MODULE_PARM_DESC(dev_hint, "Device node hints");
1267 1225
1268MODULE_DESCRIPTION("Philips & OEM USB webcam driver"); 1226MODULE_DESCRIPTION("Philips & OEM USB webcam driver");
1269MODULE_AUTHOR("Luc Saillard <luc@saillard.org>"); 1227MODULE_AUTHOR("Luc Saillard <luc@saillard.org>");
@@ -1273,114 +1231,13 @@ MODULE_VERSION( PWC_VERSION );
1273 1231
1274static int __init usb_pwc_init(void) 1232static int __init usb_pwc_init(void)
1275{ 1233{
1276 int i;
1277
1278#ifdef CONFIG_USB_PWC_DEBUG
1279 PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
1280 PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
1281 PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
1282 PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
1283
1284 if (pwc_trace >= 0) {
1285 PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
1286 }
1287#endif
1288
1289 if (fps) {
1290 if (fps < 4 || fps > 30) {
1291 PWC_ERROR("Framerate out of bounds (4-30).\n");
1292 return -EINVAL;
1293 }
1294 default_fps = fps;
1295 PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
1296 }
1297
1298 if (leds[0] >= 0)
1299 led_on = leds[0];
1300 if (leds[1] >= 0)
1301 led_off = leds[1];
1302
1303 /* Big device node whoopla. Basically, it allows you to assign a
1304 device node (/dev/videoX) to a camera, based on its type
1305 & serial number. The format is [type[.serialnumber]:]node.
1306
1307 Any camera that isn't matched by these rules gets the next
1308 available free device node.
1309 */
1310 for (i = 0; i < MAX_DEV_HINTS; i++) {
1311 char *s, *colon, *dot;
1312
1313 /* This loop also initializes the array */
1314 device_hint[i].pdev = NULL;
1315 s = dev_hint[i];
1316 if (s != NULL && *s != '\0') {
1317 device_hint[i].type = -1; /* wildcard */
1318 strcpy(device_hint[i].serial_number, "*");
1319
1320 /* parse string: chop at ':' & '/' */
1321 colon = dot = s;
1322 while (*colon != '\0' && *colon != ':')
1323 colon++;
1324 while (*dot != '\0' && *dot != '.')
1325 dot++;
1326 /* Few sanity checks */
1327 if (*dot != '\0' && dot > colon) {
1328 PWC_ERROR("Malformed camera hint: the colon must be after the dot.\n");
1329 return -EINVAL;
1330 }
1331
1332 if (*colon == '\0') {
1333 /* No colon */
1334 if (*dot != '\0') {
1335 PWC_ERROR("Malformed camera hint: no colon + device node given.\n");
1336 return -EINVAL;
1337 }
1338 else {
1339 /* No type or serial number specified, just a number. */
1340 device_hint[i].device_node =
1341 simple_strtol(s, NULL, 10);
1342 }
1343 }
1344 else {
1345 /* There's a colon, so we have at least a type and a device node */
1346 device_hint[i].type =
1347 simple_strtol(s, NULL, 10);
1348 device_hint[i].device_node =
1349 simple_strtol(colon + 1, NULL, 10);
1350 if (*dot != '\0') {
1351 /* There's a serial number as well */
1352 int k;
1353
1354 dot++;
1355 k = 0;
1356 while (*dot != ':' && k < 29) {
1357 device_hint[i].serial_number[k++] = *dot;
1358 dot++;
1359 }
1360 device_hint[i].serial_number[k] = '\0';
1361 }
1362 }
1363 PWC_TRACE("device_hint[%d]:\n", i);
1364 PWC_TRACE(" type : %d\n", device_hint[i].type);
1365 PWC_TRACE(" serial# : %s\n", device_hint[i].serial_number);
1366 PWC_TRACE(" node : %d\n", device_hint[i].device_node);
1367 }
1368 else
1369 device_hint[i].type = 0; /* not filled */
1370 } /* ..for MAX_DEV_HINTS */
1371
1372 PWC_DEBUG_PROBE("Registering driver at address 0x%p.\n", &pwc_driver);
1373 return usb_register(&pwc_driver); 1234 return usb_register(&pwc_driver);
1374} 1235}
1375 1236
1376static void __exit usb_pwc_exit(void) 1237static void __exit usb_pwc_exit(void)
1377{ 1238{
1378 PWC_DEBUG_MODULE("Deregistering driver.\n");
1379 usb_deregister(&pwc_driver); 1239 usb_deregister(&pwc_driver);
1380 PWC_INFO("Philips webcam module removed.\n");
1381} 1240}
1382 1241
1383module_init(usb_pwc_init); 1242module_init(usb_pwc_init);
1384module_exit(usb_pwc_exit); 1243module_exit(usb_pwc_exit);
1385
1386/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 23a55b5814f..9be5adffa87 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -90,5 +90,4 @@ void pwc_construct(struct pwc_device *pdev)
90 pdev->frame_header_size = 0; 90 pdev->frame_header_size = 0;
91 pdev->frame_trailer_size = 0; 91 pdev->frame_trailer_size = 0;
92 } 92 }
93 pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
94} 93}
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 80e25842e84..f495eeb5403 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -493,16 +493,11 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
493 (pixelformat>>24)&255); 493 (pixelformat>>24)&255);
494 494
495 ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height, 495 ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height,
496 pdev->vframes, &compression); 496 pixelformat, 30, &compression, 0);
497 497
498 PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret); 498 PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
499 499
500 if (ret == 0) { 500 pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
501 pdev->pixfmt = pixelformat;
502 pwc_vidioc_fill_fmt(f, pdev->width, pdev->height,
503 pdev->pixfmt);
504 }
505
506leave: 501leave:
507 mutex_unlock(&pdev->udevlock); 502 mutex_unlock(&pdev->udevlock);
508 return ret; 503 return ret;
@@ -777,33 +772,33 @@ static int pwc_set_autogain_expo(struct pwc_device *pdev)
777static int pwc_set_motor(struct pwc_device *pdev) 772static int pwc_set_motor(struct pwc_device *pdev)
778{ 773{
779 int ret; 774 int ret;
780 u8 buf[4];
781 775
782 buf[0] = 0; 776 pdev->ctrl_buf[0] = 0;
783 if (pdev->motor_pan_reset->is_new) 777 if (pdev->motor_pan_reset->is_new)
784 buf[0] |= 0x01; 778 pdev->ctrl_buf[0] |= 0x01;
785 if (pdev->motor_tilt_reset->is_new) 779 if (pdev->motor_tilt_reset->is_new)
786 buf[0] |= 0x02; 780 pdev->ctrl_buf[0] |= 0x02;
787 if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) { 781 if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
788 ret = send_control_msg(pdev, SET_MPT_CTL, 782 ret = send_control_msg(pdev, SET_MPT_CTL,
789 PT_RESET_CONTROL_FORMATTER, buf, 1); 783 PT_RESET_CONTROL_FORMATTER,
784 pdev->ctrl_buf, 1);
790 if (ret < 0) 785 if (ret < 0)
791 return ret; 786 return ret;
792 } 787 }
793 788
794 memset(buf, 0, sizeof(buf)); 789 memset(pdev->ctrl_buf, 0, 4);
795 if (pdev->motor_pan->is_new) { 790 if (pdev->motor_pan->is_new) {
796 buf[0] = pdev->motor_pan->val & 0xFF; 791 pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF;
797 buf[1] = (pdev->motor_pan->val >> 8); 792 pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8);
798 } 793 }
799 if (pdev->motor_tilt->is_new) { 794 if (pdev->motor_tilt->is_new) {
800 buf[2] = pdev->motor_tilt->val & 0xFF; 795 pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF;
801 buf[3] = (pdev->motor_tilt->val >> 8); 796 pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8);
802 } 797 }
803 if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) { 798 if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
804 ret = send_control_msg(pdev, SET_MPT_CTL, 799 ret = send_control_msg(pdev, SET_MPT_CTL,
805 PT_RELATIVE_CONTROL_FORMATTER, 800 PT_RELATIVE_CONTROL_FORMATTER,
806 buf, sizeof(buf)); 801 pdev->ctrl_buf, 4);
807 if (ret < 0) 802 if (ret < 0)
808 return ret; 803 return ret;
809 } 804 }
@@ -1094,6 +1089,63 @@ static int pwc_enum_frameintervals(struct file *file, void *fh,
1094 return 0; 1089 return 0;
1095} 1090}
1096 1091
1092static int pwc_g_parm(struct file *file, void *fh,
1093 struct v4l2_streamparm *parm)
1094{
1095 struct pwc_device *pdev = video_drvdata(file);
1096
1097 if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1098 return -EINVAL;
1099
1100 memset(parm, 0, sizeof(*parm));
1101
1102 parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1103 parm->parm.capture.readbuffers = MIN_FRAMES;
1104 parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME;
1105 parm->parm.capture.timeperframe.denominator = pdev->vframes;
1106 parm->parm.capture.timeperframe.numerator = 1;
1107
1108 return 0;
1109}
1110
1111static int pwc_s_parm(struct file *file, void *fh,
1112 struct v4l2_streamparm *parm)
1113{
1114 struct pwc_device *pdev = video_drvdata(file);
1115 int compression = 0;
1116 int ret, fps;
1117
1118 if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1119 parm->parm.capture.timeperframe.numerator == 0)
1120 return -EINVAL;
1121
1122 if (pwc_test_n_set_capt_file(pdev, file))
1123 return -EBUSY;
1124
1125 fps = parm->parm.capture.timeperframe.denominator /
1126 parm->parm.capture.timeperframe.numerator;
1127
1128 mutex_lock(&pdev->udevlock);
1129 if (!pdev->udev) {
1130 ret = -ENODEV;
1131 goto leave;
1132 }
1133
1134 if (pdev->iso_init) {
1135 ret = -EBUSY;
1136 goto leave;
1137 }
1138
1139 ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
1140 fps, &compression, 0);
1141
1142 pwc_g_parm(file, fh, parm);
1143
1144leave:
1145 mutex_unlock(&pdev->udevlock);
1146 return ret;
1147}
1148
1097static int pwc_log_status(struct file *file, void *priv) 1149static int pwc_log_status(struct file *file, void *priv)
1098{ 1150{
1099 struct pwc_device *pdev = video_drvdata(file); 1151 struct pwc_device *pdev = video_drvdata(file);
@@ -1120,4 +1172,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
1120 .vidioc_log_status = pwc_log_status, 1172 .vidioc_log_status = pwc_log_status,
1121 .vidioc_enum_framesizes = pwc_enum_framesizes, 1173 .vidioc_enum_framesizes = pwc_enum_framesizes,
1122 .vidioc_enum_frameintervals = pwc_enum_frameintervals, 1174 .vidioc_enum_frameintervals = pwc_enum_frameintervals,
1175 .vidioc_g_parm = pwc_g_parm,
1176 .vidioc_s_parm = pwc_s_parm,
1123}; 1177};
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 47c518fef17..e4d4d711dd1 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -44,6 +44,8 @@
44#ifdef CONFIG_USB_PWC_INPUT_EVDEV 44#ifdef CONFIG_USB_PWC_INPUT_EVDEV
45#include <linux/input.h> 45#include <linux/input.h>
46#endif 46#endif
47#include "pwc-dec1.h"
48#include "pwc-dec23.h"
47 49
48/* Version block */ 50/* Version block */
49#define PWC_VERSION "10.0.15" 51#define PWC_VERSION "10.0.15"
@@ -132,9 +134,6 @@
132#define DEVICE_USE_CODEC3(x) ((x)>=700) 134#define DEVICE_USE_CODEC3(x) ((x)>=700)
133#define DEVICE_USE_CODEC23(x) ((x)>=675) 135#define DEVICE_USE_CODEC23(x) ((x)>=675)
134 136
135/* from pwc-dec.h */
136#define PWCX_FLAG_PLANAR 0x0001
137
138/* Request types: video */ 137/* Request types: video */
139#define SET_LUM_CTL 0x01 138#define SET_LUM_CTL 0x01
140#define GET_LUM_CTL 0x02 139#define GET_LUM_CTL 0x02
@@ -248,8 +247,8 @@ struct pwc_device
248 char vmirror; /* for ToUCaM series */ 247 char vmirror; /* for ToUCaM series */
249 char power_save; /* Do powersaving for this cam */ 248 char power_save; /* Do powersaving for this cam */
250 249
251 int cmd_len;
252 unsigned char cmd_buf[13]; 250 unsigned char cmd_buf[13];
251 unsigned char *ctrl_buf;
253 252
254 struct urb *urbs[MAX_ISO_BUFS]; 253 struct urb *urbs[MAX_ISO_BUFS];
255 char iso_init; 254 char iso_init;
@@ -272,7 +271,10 @@ struct pwc_device
272 int frame_total_size; /* including header & trailer */ 271 int frame_total_size; /* including header & trailer */
273 int drop_frames; 272 int drop_frames;
274 273
275 void *decompress_data; /* private data for decompression engine */ 274 union { /* private data for decompression engine */
275 struct pwc_dec1_private dec1;
276 struct pwc_dec23_private dec23;
277 };
276 278
277 /* 279 /*
278 * We have an 'image' and a 'view', where 'image' is the fixed-size img 280 * We have an 'image' and a 'view', where 'image' is the fixed-size img
@@ -364,7 +366,7 @@ void pwc_construct(struct pwc_device *pdev);
364/** Functions in pwc-ctrl.c */ 366/** Functions in pwc-ctrl.c */
365/* Request a certain video mode. Returns < 0 if not possible */ 367/* Request a certain video mode. Returns < 0 if not possible */
366extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, 368extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
367 int frames, int *compression); 369 int pixfmt, int frames, int *compression, int send_to_cam);
368extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size); 370extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
369extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value); 371extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
370extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor); 372extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 510cfab477f..a9e9653beeb 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -693,7 +693,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
693 mf->code = 0; 693 mf->code = 0;
694 continue; 694 continue;
695 } 695 }
696 if (mf->width != tfmt->width || mf->width != tfmt->width) { 696 if (mf->width != tfmt->width || mf->height != tfmt->height) {
697 u32 fcc = ffmt->fourcc; 697 u32 fcc = ffmt->fourcc;
698 tfmt->width = mf->width; 698 tfmt->width = mf->width;
699 tfmt->height = mf->height; 699 tfmt->height = mf->height;
@@ -702,7 +702,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
702 NULL, &fcc, FIMC_SD_PAD_SOURCE); 702 NULL, &fcc, FIMC_SD_PAD_SOURCE);
703 if (ffmt && ffmt->mbus_code) 703 if (ffmt && ffmt->mbus_code)
704 mf->code = ffmt->mbus_code; 704 mf->code = ffmt->mbus_code;
705 if (mf->width != tfmt->width || mf->width != tfmt->width) 705 if (mf->width != tfmt->width ||
706 mf->height != tfmt->height)
706 continue; 707 continue;
707 tfmt->code = mf->code; 708 tfmt->code = mf->code;
708 } 709 }
@@ -710,7 +711,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
710 ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt); 711 ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
711 712
712 if (mf->code == tfmt->code && 713 if (mf->code == tfmt->code &&
713 mf->width == tfmt->width && mf->width == tfmt->width) 714 mf->width == tfmt->width && mf->height == tfmt->height)
714 break; 715 break;
715 } 716 }
716 717
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index f5cbb8a4c54..81bcbb9492e 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -848,11 +848,11 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
848 v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4); 848 v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
849 849
850 ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, 850 ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
851 V4L2_CID_HFLIP, 0, 1, 1, 0); 851 V4L2_CID_ROTATE, 0, 270, 90, 0);
852 ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, 852 ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
853 V4L2_CID_VFLIP, 0, 1, 1, 0); 853 V4L2_CID_HFLIP, 0, 1, 1, 0);
854 ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, 854 ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
855 V4L2_CID_ROTATE, 0, 270, 90, 0); 855 V4L2_CID_VFLIP, 0, 1, 1, 0);
856 if (variant->has_alpha) 856 if (variant->has_alpha)
857 ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler, 857 ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
858 &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, 858 &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 615c862f036..8ea4ee116e4 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -21,7 +21,6 @@
21#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/version.h>
25#include <media/v4l2-ctrls.h> 24#include <media/v4l2-ctrls.h>
26#include <media/media-device.h> 25#include <media/media-device.h>
27 26
diff --git a/drivers/media/video/s5p-g2d/g2d.c b/drivers/media/video/s5p-g2d/g2d.c
index c40b0dde188..febaa673d36 100644
--- a/drivers/media/video/s5p-g2d/g2d.c
+++ b/drivers/media/video/s5p-g2d/g2d.c
@@ -184,6 +184,7 @@ static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
184 ctx->rop = ROP4_INVERT; 184 ctx->rop = ROP4_INVERT;
185 else 185 else
186 ctx->rop = ROP4_COPY; 186 ctx->rop = ROP4_COPY;
187 break;
187 default: 188 default:
188 v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n"); 189 v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n");
189 return -EINVAL; 190 return -EINVAL;
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.c b/drivers/media/video/s5p-jpeg/jpeg-core.c
index f841a3e9845..1105a8749c8 100644
--- a/drivers/media/video/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.c
@@ -989,9 +989,10 @@ static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
989 * ============================================================================ 989 * ============================================================================
990 */ 990 */
991 991
992static int s5p_jpeg_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, 992static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
993 unsigned int *nplanes, unsigned int sizes[], 993 const struct v4l2_format *fmt,
994 void *alloc_ctxs[]) 994 unsigned int *nbuffers, unsigned int *nplanes,
995 unsigned int sizes[], void *alloc_ctxs[])
995{ 996{
996 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); 997 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
997 struct s5p_jpeg_q_data *q_data = NULL; 998 struct s5p_jpeg_q_data *q_data = NULL;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
index e43e128baf5..83fe461af26 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -18,7 +18,6 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/version.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/workqueue.h> 22#include <linux/workqueue.h>
24#include <media/videobuf2-core.h> 23#include <media/videobuf2-core.h>
@@ -475,7 +474,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
475 ctx->mv_size = 0; 474 ctx->mv_size = 0;
476 } 475 }
477 ctx->dpb_count = s5p_mfc_get_dpb_count(); 476 ctx->dpb_count = s5p_mfc_get_dpb_count();
478 if (ctx->img_width == 0 || ctx->img_width == 0) 477 if (ctx->img_width == 0 || ctx->img_height == 0)
479 ctx->state = MFCINST_ERROR; 478 ctx->state = MFCINST_ERROR;
480 else 479 else
481 ctx->state = MFCINST_HEAD_PARSED; 480 ctx->state = MFCINST_HEAD_PARSED;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 844a4d7797b..c25ec022d26 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -165,7 +165,7 @@ static struct mfc_control controls[] = {
165 .maximum = 32, 165 .maximum = 32,
166 .step = 1, 166 .step = 1,
167 .default_value = 1, 167 .default_value = 1,
168 .flags = V4L2_CTRL_FLAG_VOLATILE, 168 .is_volatile = 1,
169 }, 169 },
170}; 170};
171 171
diff --git a/drivers/media/video/saa7164/saa7164-cards.c b/drivers/media/video/saa7164/saa7164-cards.c
index 971591d6450..5b72da5ce41 100644
--- a/drivers/media/video/saa7164/saa7164-cards.c
+++ b/drivers/media/video/saa7164/saa7164-cards.c
@@ -269,8 +269,6 @@ struct saa7164_board saa7164_boards[] = {
269 .portb = SAA7164_MPEG_DVB, 269 .portb = SAA7164_MPEG_DVB,
270 .portc = SAA7164_MPEG_ENCODER, 270 .portc = SAA7164_MPEG_ENCODER,
271 .portd = SAA7164_MPEG_ENCODER, 271 .portd = SAA7164_MPEG_ENCODER,
272 .portc = SAA7164_MPEG_ENCODER,
273 .portd = SAA7164_MPEG_ENCODER,
274 .porte = SAA7164_MPEG_VBI, 272 .porte = SAA7164_MPEG_VBI,
275 .portf = SAA7164_MPEG_VBI, 273 .portf = SAA7164_MPEG_VBI,
276 .chiprev = SAA7164_CHIP_REV3, 274 .chiprev = SAA7164_CHIP_REV3,
@@ -333,8 +331,6 @@ struct saa7164_board saa7164_boards[] = {
333 .portd = SAA7164_MPEG_ENCODER, 331 .portd = SAA7164_MPEG_ENCODER,
334 .porte = SAA7164_MPEG_VBI, 332 .porte = SAA7164_MPEG_VBI,
335 .portf = SAA7164_MPEG_VBI, 333 .portf = SAA7164_MPEG_VBI,
336 .porte = SAA7164_MPEG_VBI,
337 .portf = SAA7164_MPEG_VBI,
338 .chiprev = SAA7164_CHIP_REV3, 334 .chiprev = SAA7164_CHIP_REV3,
339 .unit = {{ 335 .unit = {{
340 .id = 0x28, 336 .id = 0x28,
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index 0a2d75f0406..4ed1c7c28ae 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
565 spin_unlock_irq(&fh->queue_lock); 565 spin_unlock_irq(&fh->queue_lock);
566 566
567 desc = fh->chan->device->device_prep_slave_sg(fh->chan, 567 desc = fh->chan->device->device_prep_slave_sg(fh->chan,
568 buf->sg, sg_elems, DMA_FROM_DEVICE, 568 buf->sg, sg_elems, DMA_DEV_TO_MEM,
569 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 569 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
570 if (!desc) { 570 if (!desc) {
571 spin_lock_irq(&fh->queue_lock); 571 spin_lock_irq(&fh->queue_lock);
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 129f135d5a5..c096b3f7420 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -374,7 +374,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
374} 374}
375#endif 375#endif
376 376
377static bool check_firmware(struct usb_device *udev, int *down_firmware) 377static int check_firmware(struct usb_device *udev, int *down_firmware)
378{ 378{
379 void *buf; 379 void *buf;
380 int ret; 380 int ret;
@@ -398,7 +398,7 @@ static bool check_firmware(struct usb_device *udev, int *down_firmware)
398 *down_firmware = 1; 398 *down_firmware = 1;
399 return firmware_download(udev); 399 return firmware_download(udev);
400 } 400 }
401 return ret; 401 return 0;
402} 402}
403 403
404static int poseidon_probe(struct usb_interface *interface, 404static int poseidon_probe(struct usb_interface *interface,
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index da1f4c2d2d4..cccd42be718 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -465,8 +465,8 @@ const char *v4l2_ctrl_get_name(u32 id)
465 case V4L2_CID_CHROMA_GAIN: return "Chroma Gain"; 465 case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
466 case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1"; 466 case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
467 case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2"; 467 case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
468 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Minimum Number of Capture Buffers"; 468 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
469 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Minimum Number of Output Buffers"; 469 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
470 case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component"; 470 case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
471 471
472 /* MPEG controls */ 472 /* MPEG controls */
@@ -506,25 +506,25 @@ const char *v4l2_ctrl_get_name(u32 id)
506 case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV"; 506 case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
507 case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface"; 507 case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
508 case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable"; 508 case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
509 case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "The Number of Intra Refresh MBs"; 509 case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
510 case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable"; 510 case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
511 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control"; 511 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
512 case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode"; 512 case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
513 case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "The Max Number of Reference Picture"; 513 case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
514 case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value"; 514 case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
515 case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P frame QP Value"; 515 case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
516 case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B frame QP Value"; 516 case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
517 case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value"; 517 case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
518 case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value"; 518 case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
519 case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value"; 519 case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
520 case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P frame QP Value"; 520 case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
521 case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B frame QP Value"; 521 case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
522 case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value"; 522 case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
523 case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value"; 523 case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
524 case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable"; 524 case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
525 case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size"; 525 case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
526 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entorpy Mode"; 526 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
527 case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I Period"; 527 case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
528 case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level"; 528 case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
529 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset"; 529 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
530 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset"; 530 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
@@ -535,16 +535,16 @@ const char *v4l2_ctrl_get_name(u32 id)
535 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable"; 535 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
536 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC"; 536 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
537 case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; 537 case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
538 case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P frame QP Value"; 538 case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
539 case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B frame QP Value"; 539 case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
540 case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value"; 540 case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
541 case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value"; 541 case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
542 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level"; 542 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
543 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile"; 543 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
544 case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable"; 544 case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
545 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "The Maximum Bytes Per Slice"; 545 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
546 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "The Number of MB in a Slice"; 546 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
547 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "The Slice Partitioning Method"; 547 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
548 case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size"; 548 case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
549 549
550 /* CAMERA controls */ 550 /* CAMERA controls */
@@ -580,7 +580,7 @@ const char *v4l2_ctrl_get_name(u32 id)
580 case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled"; 580 case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
581 case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time"; 581 case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
582 case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation"; 582 case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
583 case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled"; 583 case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
584 case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain"; 584 case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
585 case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold"; 585 case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
586 case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time"; 586 case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
@@ -588,24 +588,24 @@ const char *v4l2_ctrl_get_name(u32 id)
588 case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled"; 588 case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
589 case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation"; 589 case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
590 case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency"; 590 case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
591 case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-emphasis settings"; 591 case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
592 case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level"; 592 case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
593 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor"; 593 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
594 594
595 /* Flash controls */ 595 /* Flash controls */
596 case V4L2_CID_FLASH_CLASS: return "Flash controls"; 596 case V4L2_CID_FLASH_CLASS: return "Flash Controls";
597 case V4L2_CID_FLASH_LED_MODE: return "LED mode"; 597 case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
598 case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe source"; 598 case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
599 case V4L2_CID_FLASH_STROBE: return "Strobe"; 599 case V4L2_CID_FLASH_STROBE: return "Strobe";
600 case V4L2_CID_FLASH_STROBE_STOP: return "Stop strobe"; 600 case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
601 case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe status"; 601 case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
602 case V4L2_CID_FLASH_TIMEOUT: return "Strobe timeout"; 602 case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
603 case V4L2_CID_FLASH_INTENSITY: return "Intensity, flash mode"; 603 case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
604 case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, torch mode"; 604 case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
605 case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator"; 605 case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
606 case V4L2_CID_FLASH_FAULT: return "Faults"; 606 case V4L2_CID_FLASH_FAULT: return "Faults";
607 case V4L2_CID_FLASH_CHARGE: return "Charge"; 607 case V4L2_CID_FLASH_CHARGE: return "Charge";
608 case V4L2_CID_FLASH_READY: return "Ready to strobe"; 608 case V4L2_CID_FLASH_READY: return "Ready to Strobe";
609 609
610 default: 610 default:
611 return NULL; 611 return NULL;
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 77feeb67e2d..3f623859a33 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1871,6 +1871,7 @@ static long __video_do_ioctl(struct file *file,
1871 case VIDIOC_S_FREQUENCY: 1871 case VIDIOC_S_FREQUENCY:
1872 { 1872 {
1873 struct v4l2_frequency *p = arg; 1873 struct v4l2_frequency *p = arg;
1874 enum v4l2_tuner_type type;
1874 1875
1875 if (!ops->vidioc_s_frequency) 1876 if (!ops->vidioc_s_frequency)
1876 break; 1877 break;
@@ -1878,9 +1879,14 @@ static long __video_do_ioctl(struct file *file,
1878 ret = ret_prio; 1879 ret = ret_prio;
1879 break; 1880 break;
1880 } 1881 }
1882 type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
1883 V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1881 dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n", 1884 dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
1882 p->tuner, p->type, p->frequency); 1885 p->tuner, p->type, p->frequency);
1883 ret = ops->vidioc_s_frequency(file, fh, p); 1886 if (p->type != type)
1887 ret = -EINVAL;
1888 else
1889 ret = ops->vidioc_s_frequency(file, fh, p);
1884 break; 1890 break;
1885 } 1891 }
1886 case VIDIOC_G_SLICED_VBI_CAP: 1892 case VIDIOC_G_SLICED_VBI_CAP:
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index f6d26419445..4c09ab781ec 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1958,7 +1958,6 @@ static int zoran_g_fbuf(struct file *file, void *__fh,
1958 mutex_unlock(&zr->resource_lock); 1958 mutex_unlock(&zr->resource_lock);
1959 fb->fmt.colorspace = V4L2_COLORSPACE_SRGB; 1959 fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
1960 fb->fmt.field = V4L2_FIELD_INTERLACED; 1960 fb->fmt.field = V4L2_FIELD_INTERLACED;
1961 fb->flags = V4L2_FBUF_FLAG_OVERLAY;
1962 fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING; 1961 fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
1963 1962
1964 return 0; 1963 return 0;
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index eb5cd28bc6d..a2d25e4857e 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -513,7 +513,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
513 * transaction, and then put it under external control 513 * transaction, and then put it under external control
514 */ 514 */
515 memset(&config, 0, sizeof(config)); 515 memset(&config, 0, sizeof(config));
516 config.direction = DMA_TO_DEVICE; 516 config.direction = DMA_MEM_TO_DEV;
517 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 517 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
518 config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4; 518 config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
519 ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG, 519 ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index a7ee5027146..fcfe1eb5acc 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -823,6 +823,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
823 struct scatterlist *sg; 823 struct scatterlist *sg;
824 unsigned int i; 824 unsigned int i;
825 enum dma_data_direction direction; 825 enum dma_data_direction direction;
826 enum dma_transfer_direction slave_dirn;
826 unsigned int sglen; 827 unsigned int sglen;
827 u32 iflags; 828 u32 iflags;
828 829
@@ -860,16 +861,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
860 if (host->caps.has_dma) 861 if (host->caps.has_dma)
861 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN); 862 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
862 863
863 if (data->flags & MMC_DATA_READ) 864 if (data->flags & MMC_DATA_READ) {
864 direction = DMA_FROM_DEVICE; 865 direction = DMA_FROM_DEVICE;
865 else 866 slave_dirn = DMA_DEV_TO_MEM;
867 } else {
866 direction = DMA_TO_DEVICE; 868 direction = DMA_TO_DEVICE;
869 slave_dirn = DMA_MEM_TO_DEV;
870 }
867 871
868 sglen = dma_map_sg(chan->device->dev, data->sg, 872 sglen = dma_map_sg(chan->device->dev, data->sg,
869 data->sg_len, direction); 873 data->sg_len, direction);
870 874
871 desc = chan->device->device_prep_slave_sg(chan, 875 desc = chan->device->device_prep_slave_sg(chan,
872 data->sg, sglen, direction, 876 data->sg, sglen, slave_dirn,
873 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 877 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
874 if (!desc) 878 if (!desc)
875 goto unmap_exit; 879 goto unmap_exit;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index ece03b491c7..0d955ffaf44 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -374,6 +374,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
374 struct dma_chan *chan; 374 struct dma_chan *chan;
375 struct dma_device *device; 375 struct dma_device *device;
376 struct dma_async_tx_descriptor *desc; 376 struct dma_async_tx_descriptor *desc;
377 enum dma_data_direction buffer_dirn;
377 int nr_sg; 378 int nr_sg;
378 379
379 /* Check if next job is already prepared */ 380 /* Check if next job is already prepared */
@@ -387,10 +388,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
387 } 388 }
388 389
389 if (data->flags & MMC_DATA_READ) { 390 if (data->flags & MMC_DATA_READ) {
390 conf.direction = DMA_FROM_DEVICE; 391 conf.direction = DMA_DEV_TO_MEM;
392 buffer_dirn = DMA_FROM_DEVICE;
391 chan = host->dma_rx_channel; 393 chan = host->dma_rx_channel;
392 } else { 394 } else {
393 conf.direction = DMA_TO_DEVICE; 395 conf.direction = DMA_MEM_TO_DEV;
396 buffer_dirn = DMA_TO_DEVICE;
394 chan = host->dma_tx_channel; 397 chan = host->dma_tx_channel;
395 } 398 }
396 399
@@ -403,7 +406,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
403 return -EINVAL; 406 return -EINVAL;
404 407
405 device = chan->device; 408 device = chan->device;
406 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); 409 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
407 if (nr_sg == 0) 410 if (nr_sg == 0)
408 return -EINVAL; 411 return -EINVAL;
409 412
@@ -426,7 +429,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
426 unmap_exit: 429 unmap_exit:
427 if (!next) 430 if (!next)
428 dmaengine_terminate_all(chan); 431 dmaengine_terminate_all(chan);
429 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); 432 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
430 return -ENOMEM; 433 return -ENOMEM;
431} 434}
432 435
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 7088b40f957..4184b7946bb 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -218,6 +218,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
218 unsigned int blksz = data->blksz; 218 unsigned int blksz = data->blksz;
219 unsigned int datasize = nob * blksz; 219 unsigned int datasize = nob * blksz;
220 struct scatterlist *sg; 220 struct scatterlist *sg;
221 enum dma_transfer_direction slave_dirn;
221 int i, nents; 222 int i, nents;
222 223
223 if (data->flags & MMC_DATA_STREAM) 224 if (data->flags & MMC_DATA_STREAM)
@@ -240,10 +241,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
240 } 241 }
241 } 242 }
242 243
243 if (data->flags & MMC_DATA_READ) 244 if (data->flags & MMC_DATA_READ) {
244 host->dma_dir = DMA_FROM_DEVICE; 245 host->dma_dir = DMA_FROM_DEVICE;
245 else 246 slave_dirn = DMA_DEV_TO_MEM;
247 } else {
246 host->dma_dir = DMA_TO_DEVICE; 248 host->dma_dir = DMA_TO_DEVICE;
249 slave_dirn = DMA_MEM_TO_DEV;
250 }
247 251
248 nents = dma_map_sg(host->dma->device->dev, data->sg, 252 nents = dma_map_sg(host->dma->device->dev, data->sg,
249 data->sg_len, host->dma_dir); 253 data->sg_len, host->dma_dir);
@@ -251,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
251 return -EINVAL; 255 return -EINVAL;
252 256
253 host->desc = host->dma->device->device_prep_slave_sg(host->dma, 257 host->desc = host->dma->device->device_prep_slave_sg(host->dma,
254 data->sg, data->sg_len, host->dma_dir, 258 data->sg, data->sg_len, slave_dirn,
255 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 259 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
256 260
257 if (!host->desc) { 261 if (!host->desc) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 4e2e019dd5c..382c835d217 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -154,6 +154,7 @@ struct mxs_mmc_host {
154 struct dma_chan *dmach; 154 struct dma_chan *dmach;
155 struct mxs_dma_data dma_data; 155 struct mxs_dma_data dma_data;
156 unsigned int dma_dir; 156 unsigned int dma_dir;
157 enum dma_transfer_direction slave_dirn;
157 u32 ssp_pio_words[SSP_PIO_NUM]; 158 u32 ssp_pio_words[SSP_PIO_NUM];
158 159
159 unsigned int version; 160 unsigned int version;
@@ -324,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
324 } 325 }
325 326
326 desc = host->dmach->device->device_prep_slave_sg(host->dmach, 327 desc = host->dmach->device->device_prep_slave_sg(host->dmach,
327 sgl, sg_len, host->dma_dir, append); 328 sgl, sg_len, host->slave_dirn, append);
328 if (desc) { 329 if (desc) {
329 desc->callback = mxs_mmc_dma_irq_callback; 330 desc->callback = mxs_mmc_dma_irq_callback;
330 desc->callback_param = host; 331 desc->callback_param = host;
@@ -356,6 +357,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
356 host->ssp_pio_words[1] = cmd0; 357 host->ssp_pio_words[1] = cmd0;
357 host->ssp_pio_words[2] = cmd1; 358 host->ssp_pio_words[2] = cmd1;
358 host->dma_dir = DMA_NONE; 359 host->dma_dir = DMA_NONE;
360 host->slave_dirn = DMA_TRANS_NONE;
359 desc = mxs_mmc_prep_dma(host, 0); 361 desc = mxs_mmc_prep_dma(host, 0);
360 if (!desc) 362 if (!desc)
361 goto out; 363 goto out;
@@ -395,6 +397,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
395 host->ssp_pio_words[1] = cmd0; 397 host->ssp_pio_words[1] = cmd0;
396 host->ssp_pio_words[2] = cmd1; 398 host->ssp_pio_words[2] = cmd1;
397 host->dma_dir = DMA_NONE; 399 host->dma_dir = DMA_NONE;
400 host->slave_dirn = DMA_TRANS_NONE;
398 desc = mxs_mmc_prep_dma(host, 0); 401 desc = mxs_mmc_prep_dma(host, 0);
399 if (!desc) 402 if (!desc)
400 goto out; 403 goto out;
@@ -433,6 +436,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
433 int i; 436 int i;
434 437
435 unsigned short dma_data_dir, timeout; 438 unsigned short dma_data_dir, timeout;
439 enum dma_transfer_direction slave_dirn;
436 unsigned int data_size = 0, log2_blksz; 440 unsigned int data_size = 0, log2_blksz;
437 unsigned int blocks = data->blocks; 441 unsigned int blocks = data->blocks;
438 442
@@ -448,9 +452,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
448 452
449 if (data->flags & MMC_DATA_WRITE) { 453 if (data->flags & MMC_DATA_WRITE) {
450 dma_data_dir = DMA_TO_DEVICE; 454 dma_data_dir = DMA_TO_DEVICE;
455 slave_dirn = DMA_MEM_TO_DEV;
451 read = 0; 456 read = 0;
452 } else { 457 } else {
453 dma_data_dir = DMA_FROM_DEVICE; 458 dma_data_dir = DMA_FROM_DEVICE;
459 slave_dirn = DMA_DEV_TO_MEM;
454 read = BM_SSP_CTRL0_READ; 460 read = BM_SSP_CTRL0_READ;
455 } 461 }
456 462
@@ -510,6 +516,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
510 host->ssp_pio_words[1] = cmd0; 516 host->ssp_pio_words[1] = cmd0;
511 host->ssp_pio_words[2] = cmd1; 517 host->ssp_pio_words[2] = cmd1;
512 host->dma_dir = DMA_NONE; 518 host->dma_dir = DMA_NONE;
519 host->slave_dirn = DMA_TRANS_NONE;
513 desc = mxs_mmc_prep_dma(host, 0); 520 desc = mxs_mmc_prep_dma(host, 0);
514 if (!desc) 521 if (!desc)
515 goto out; 522 goto out;
@@ -518,6 +525,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
518 WARN_ON(host->data != NULL); 525 WARN_ON(host->data != NULL);
519 host->data = data; 526 host->data = data;
520 host->dma_dir = dma_data_dir; 527 host->dma_dir = dma_data_dir;
528 host->slave_dirn = slave_dirn;
521 desc = mxs_mmc_prep_dma(host, 1); 529 desc = mxs_mmc_prep_dma(host, 1);
522 if (!desc) 530 if (!desc)
523 goto out; 531 goto out;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 4a2c5b2355f..f5d8b53be33 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
286 if (ret > 0) { 286 if (ret > 0) {
287 host->dma_active = true; 287 host->dma_active = true;
288 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 288 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
289 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 289 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
290 } 290 }
291 291
292 if (desc) { 292 if (desc) {
@@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
335 if (ret > 0) { 335 if (ret > 0) {
336 host->dma_active = true; 336 host->dma_active = true;
337 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 337 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
338 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 338 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
339 } 339 }
340 340
341 if (desc) { 341 if (desc) {
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 86f259cdfcb..7a6e6cc8f8b 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -77,7 +77,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
77 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 77 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
78 if (ret > 0) 78 if (ret > 0)
79 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 79 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
80 DMA_FROM_DEVICE, DMA_CTRL_ACK); 80 DMA_DEV_TO_MEM, DMA_CTRL_ACK);
81 81
82 if (desc) { 82 if (desc) {
83 cookie = dmaengine_submit(desc); 83 cookie = dmaengine_submit(desc);
@@ -158,7 +158,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
158 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 158 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
159 if (ret > 0) 159 if (ret > 0)
160 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 160 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
161 DMA_TO_DEVICE, DMA_CTRL_ACK); 161 DMA_MEM_TO_DEV, DMA_CTRL_ACK);
162 162
163 if (desc) { 163 if (desc) {
164 cookie = dmaengine_submit(desc); 164 cookie = dmaengine_submit(desc);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 2a56fc6f399..7f680420bfa 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -827,7 +827,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
827 pio[1] = pio[2] = 0; 827 pio[1] = pio[2] = 0;
828 desc = channel->device->device_prep_slave_sg(channel, 828 desc = channel->device->device_prep_slave_sg(channel,
829 (struct scatterlist *)pio, 829 (struct scatterlist *)pio,
830 ARRAY_SIZE(pio), DMA_NONE, 0); 830 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
831 if (!desc) { 831 if (!desc) {
832 pr_err("step 1 error\n"); 832 pr_err("step 1 error\n");
833 return -1; 833 return -1;
@@ -839,7 +839,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
839 sg_init_one(sgl, this->cmd_buffer, this->command_length); 839 sg_init_one(sgl, this->cmd_buffer, this->command_length);
840 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); 840 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
841 desc = channel->device->device_prep_slave_sg(channel, 841 desc = channel->device->device_prep_slave_sg(channel,
842 sgl, 1, DMA_TO_DEVICE, 1); 842 sgl, 1, DMA_MEM_TO_DEV, 1);
843 if (!desc) { 843 if (!desc) {
844 pr_err("step 2 error\n"); 844 pr_err("step 2 error\n");
845 return -1; 845 return -1;
@@ -872,7 +872,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
872 pio[1] = 0; 872 pio[1] = 0;
873 desc = channel->device->device_prep_slave_sg(channel, 873 desc = channel->device->device_prep_slave_sg(channel,
874 (struct scatterlist *)pio, 874 (struct scatterlist *)pio,
875 ARRAY_SIZE(pio), DMA_NONE, 0); 875 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
876 if (!desc) { 876 if (!desc) {
877 pr_err("step 1 error\n"); 877 pr_err("step 1 error\n");
878 return -1; 878 return -1;
@@ -881,7 +881,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
881 /* [2] send DMA request */ 881 /* [2] send DMA request */
882 prepare_data_dma(this, DMA_TO_DEVICE); 882 prepare_data_dma(this, DMA_TO_DEVICE);
883 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, 883 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
884 1, DMA_TO_DEVICE, 1); 884 1, DMA_MEM_TO_DEV, 1);
885 if (!desc) { 885 if (!desc) {
886 pr_err("step 2 error\n"); 886 pr_err("step 2 error\n");
887 return -1; 887 return -1;
@@ -908,7 +908,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
908 pio[1] = 0; 908 pio[1] = 0;
909 desc = channel->device->device_prep_slave_sg(channel, 909 desc = channel->device->device_prep_slave_sg(channel,
910 (struct scatterlist *)pio, 910 (struct scatterlist *)pio,
911 ARRAY_SIZE(pio), DMA_NONE, 0); 911 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
912 if (!desc) { 912 if (!desc) {
913 pr_err("step 1 error\n"); 913 pr_err("step 1 error\n");
914 return -1; 914 return -1;
@@ -917,7 +917,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
917 /* [2] : send DMA request */ 917 /* [2] : send DMA request */
918 prepare_data_dma(this, DMA_FROM_DEVICE); 918 prepare_data_dma(this, DMA_FROM_DEVICE);
919 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, 919 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
920 1, DMA_FROM_DEVICE, 1); 920 1, DMA_DEV_TO_MEM, 1);
921 if (!desc) { 921 if (!desc) {
922 pr_err("step 2 error\n"); 922 pr_err("step 2 error\n");
923 return -1; 923 return -1;
@@ -964,7 +964,7 @@ int gpmi_send_page(struct gpmi_nand_data *this,
964 964
965 desc = channel->device->device_prep_slave_sg(channel, 965 desc = channel->device->device_prep_slave_sg(channel,
966 (struct scatterlist *)pio, 966 (struct scatterlist *)pio,
967 ARRAY_SIZE(pio), DMA_NONE, 0); 967 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
968 if (!desc) { 968 if (!desc) {
969 pr_err("step 2 error\n"); 969 pr_err("step 2 error\n");
970 return -1; 970 return -1;
@@ -998,7 +998,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
998 | BF_GPMI_CTRL0_XFER_COUNT(0); 998 | BF_GPMI_CTRL0_XFER_COUNT(0);
999 pio[1] = 0; 999 pio[1] = 0;
1000 desc = channel->device->device_prep_slave_sg(channel, 1000 desc = channel->device->device_prep_slave_sg(channel,
1001 (struct scatterlist *)pio, 2, DMA_NONE, 0); 1001 (struct scatterlist *)pio, 2,
1002 DMA_TRANS_NONE, 0);
1002 if (!desc) { 1003 if (!desc) {
1003 pr_err("step 1 error\n"); 1004 pr_err("step 1 error\n");
1004 return -1; 1005 return -1;
@@ -1027,7 +1028,7 @@ int gpmi_read_page(struct gpmi_nand_data *this,
1027 pio[5] = auxiliary; 1028 pio[5] = auxiliary;
1028 desc = channel->device->device_prep_slave_sg(channel, 1029 desc = channel->device->device_prep_slave_sg(channel,
1029 (struct scatterlist *)pio, 1030 (struct scatterlist *)pio,
1030 ARRAY_SIZE(pio), DMA_NONE, 1); 1031 ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
1031 if (!desc) { 1032 if (!desc) {
1032 pr_err("step 2 error\n"); 1033 pr_err("step 2 error\n");
1033 return -1; 1034 return -1;
@@ -1045,7 +1046,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
1045 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); 1046 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1046 pio[1] = 0; 1047 pio[1] = 0;
1047 desc = channel->device->device_prep_slave_sg(channel, 1048 desc = channel->device->device_prep_slave_sg(channel,
1048 (struct scatterlist *)pio, 2, DMA_NONE, 1); 1049 (struct scatterlist *)pio, 2,
1050 DMA_TRANS_NONE, 1);
1049 if (!desc) { 1051 if (!desc) {
1050 pr_err("step 3 error\n"); 1052 pr_err("step 3 error\n");
1051 return -1; 1053 return -1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index a688b9d975a..f99c6e312a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -365,13 +365,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
365 DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx); 365 DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
366 366
367 if (cmd->autoneg == AUTONEG_ENABLE) { 367 if (cmd->autoneg == AUTONEG_ENABLE) {
368 u32 an_supported_speed = bp->port.supported[cfg_idx];
369 if (bp->link_params.phy[EXT_PHY1].type ==
370 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
371 an_supported_speed |= (SUPPORTED_100baseT_Half |
372 SUPPORTED_100baseT_Full);
368 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { 373 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
369 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 374 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
370 return -EINVAL; 375 return -EINVAL;
371 } 376 }
372 377
373 /* advertise the requested speed and duplex if supported */ 378 /* advertise the requested speed and duplex if supported */
374 if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { 379 if (cmd->advertising & ~an_supported_speed) {
375 DP(NETIF_MSG_LINK, "Advertisement parameters " 380 DP(NETIF_MSG_LINK, "Advertisement parameters "
376 "are not supported\n"); 381 "are not supported\n");
377 return -EINVAL; 382 return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 4df9505b67b..2091e5dbbcd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -2502,7 +2502,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2502 struct bnx2x_nig_brb_pfc_port_params *nig_params) 2502 struct bnx2x_nig_brb_pfc_port_params *nig_params)
2503{ 2503{
2504 u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0; 2504 u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
2505 u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0; 2505 u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
2506 u32 pkt_priority_to_cos = 0; 2506 u32 pkt_priority_to_cos = 0;
2507 struct bnx2x *bp = params->bp; 2507 struct bnx2x *bp = params->bp;
2508 u8 port = params->port; 2508 u8 port = params->port;
@@ -2516,9 +2516,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2516 * MAC control frames (that are not pause packets) 2516 * MAC control frames (that are not pause packets)
2517 * will be forwarded to the XCM. 2517 * will be forwarded to the XCM.
2518 */ 2518 */
2519 xcm_mask = REG_RD(bp, 2519 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
2520 port ? NIG_REG_LLH1_XCM_MASK : 2520 NIG_REG_LLH0_XCM_MASK);
2521 NIG_REG_LLH0_XCM_MASK);
2522 /* 2521 /*
2523 * nig params will override non PFC params, since it's possible to 2522 * nig params will override non PFC params, since it's possible to
2524 * do transition from PFC to SAFC 2523 * do transition from PFC to SAFC
@@ -2533,8 +2532,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2533 ppp_enable = 1; 2532 ppp_enable = 1;
2534 xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : 2533 xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
2535 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); 2534 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
2536 xcm0_out_en = 0; 2535 xcm_out_en = 0;
2537 p0_hwpfc_enable = 1; 2536 hwpfc_enable = 1;
2538 } else { 2537 } else {
2539 if (nig_params) { 2538 if (nig_params) {
2540 llfc_out_en = nig_params->llfc_out_en; 2539 llfc_out_en = nig_params->llfc_out_en;
@@ -2545,7 +2544,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2545 2544
2546 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : 2545 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
2547 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); 2546 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
2548 xcm0_out_en = 1; 2547 xcm_out_en = 1;
2549 } 2548 }
2550 2549
2551 if (CHIP_IS_E3(bp)) 2550 if (CHIP_IS_E3(bp))
@@ -2564,13 +2563,16 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2564 REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK : 2563 REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
2565 NIG_REG_LLH0_XCM_MASK, xcm_mask); 2564 NIG_REG_LLH0_XCM_MASK, xcm_mask);
2566 2565
2567 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); 2566 REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
2567 NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
2568 2568
2569 /* output enable for RX_XCM # IF */ 2569 /* output enable for RX_XCM # IF */
2570 REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en); 2570 REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
2571 NIG_REG_XCM0_OUT_EN, xcm_out_en);
2571 2572
2572 /* HW PFC TX enable */ 2573 /* HW PFC TX enable */
2573 REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable); 2574 REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
2575 NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
2574 2576
2575 if (nig_params) { 2577 if (nig_params) {
2576 u8 i = 0; 2578 u8 i = 0;
@@ -3761,7 +3763,15 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3761 /* Advertise pause */ 3763 /* Advertise pause */
3762 bnx2x_ext_phy_set_pause(params, phy, vars); 3764 bnx2x_ext_phy_set_pause(params, phy, vars);
3763 3765
3764 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3766 /*
3767 * Set KR Autoneg Work-Around flag for Warpcore version older than D108
3768 */
3769 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3770 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
3771 if (val16 < 0xd108) {
3772 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
3773 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3774 }
3765 3775
3766 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3776 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3767 MDIO_WC_REG_DIGITAL5_MISC7, &val16); 3777 MDIO_WC_REG_DIGITAL5_MISC7, &val16);
@@ -9266,62 +9276,68 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
9266/* BCM8481/BCM84823/BCM84833 PHY SECTION */ 9276/* BCM8481/BCM84823/BCM84833 PHY SECTION */
9267/******************************************************************/ 9277/******************************************************************/
9268static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, 9278static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9269 struct link_params *params) 9279 struct bnx2x *bp,
9280 u8 port)
9270{ 9281{
9271 u16 val, fw_ver1, fw_ver2, cnt; 9282 u16 val, fw_ver1, fw_ver2, cnt;
9272 u8 port;
9273 struct bnx2x *bp = params->bp;
9274 9283
9275 port = params->port; 9284 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9285 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9286 bnx2x_save_spirom_version(bp, port,
9287 ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
9288 phy->ver_addr);
9289 } else {
9290 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
9291 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
9292 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
9293 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
9294 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
9295 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
9296 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
9297
9298 for (cnt = 0; cnt < 100; cnt++) {
9299 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
9300 if (val & 1)
9301 break;
9302 udelay(5);
9303 }
9304 if (cnt == 100) {
9305 DP(NETIF_MSG_LINK, "Unable to read 848xx "
9306 "phy fw version(1)\n");
9307 bnx2x_save_spirom_version(bp, port, 0,
9308 phy->ver_addr);
9309 return;
9310 }
9276 9311
9277 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
9278 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
9279 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
9280 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
9281 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
9282 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
9283 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
9284 9312
9285 for (cnt = 0; cnt < 100; cnt++) { 9313 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
9286 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 9314 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
9287 if (val & 1) 9315 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
9288 break; 9316 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
9289 udelay(5); 9317 for (cnt = 0; cnt < 100; cnt++) {
9290 } 9318 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
9291 if (cnt == 100) { 9319 if (val & 1)
9292 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n"); 9320 break;
9293 bnx2x_save_spirom_version(bp, port, 0, 9321 udelay(5);
9294 phy->ver_addr); 9322 }
9295 return; 9323 if (cnt == 100) {
9296 } 9324 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw "
9325 "version(2)\n");
9326 bnx2x_save_spirom_version(bp, port, 0,
9327 phy->ver_addr);
9328 return;
9329 }
9297 9330
9331 /* lower 16 bits of the register SPI_FW_STATUS */
9332 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
9333 /* upper 16 bits of register SPI_FW_STATUS */
9334 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
9298 9335
9299 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ 9336 bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
9300 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
9301 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
9302 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
9303 for (cnt = 0; cnt < 100; cnt++) {
9304 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
9305 if (val & 1)
9306 break;
9307 udelay(5);
9308 }
9309 if (cnt == 100) {
9310 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
9311 bnx2x_save_spirom_version(bp, port, 0,
9312 phy->ver_addr); 9337 phy->ver_addr);
9313 return;
9314 } 9338 }
9315 9339
9316 /* lower 16 bits of the register SPI_FW_STATUS */
9317 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
9318 /* upper 16 bits of register SPI_FW_STATUS */
9319 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
9320
9321 bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
9322 phy->ver_addr);
9323} 9340}
9324
9325static void bnx2x_848xx_set_led(struct bnx2x *bp, 9341static void bnx2x_848xx_set_led(struct bnx2x *bp,
9326 struct bnx2x_phy *phy) 9342 struct bnx2x_phy *phy)
9327{ 9343{
@@ -9392,10 +9408,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9392 u16 tmp_req_line_speed; 9408 u16 tmp_req_line_speed;
9393 9409
9394 tmp_req_line_speed = phy->req_line_speed; 9410 tmp_req_line_speed = phy->req_line_speed;
9395 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 9411 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9396 if (phy->req_line_speed == SPEED_10000) 9412 if (phy->req_line_speed == SPEED_10000)
9397 phy->req_line_speed = SPEED_AUTO_NEG; 9413 phy->req_line_speed = SPEED_AUTO_NEG;
9398 9414 } else {
9415 /* Save spirom version */
9416 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9417 }
9399 /* 9418 /*
9400 * This phy uses the NIG latch mechanism since link indication 9419 * This phy uses the NIG latch mechanism since link indication
9401 * arrives through its LED4 and not via its LASI signal, so we 9420 * arrives through its LED4 and not via its LASI signal, so we
@@ -9443,13 +9462,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9443 an_1000_val); 9462 an_1000_val);
9444 9463
9445 /* set 100 speed advertisement */ 9464 /* set 100 speed advertisement */
9446 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 9465 if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
9447 (phy->speed_cap_mask & 9466 (phy->speed_cap_mask &
9448 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 9467 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
9449 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) && 9468 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
9450 (phy->supported &
9451 (SUPPORTED_100baseT_Half |
9452 SUPPORTED_100baseT_Full)))) {
9453 an_10_100_val |= (1<<7); 9469 an_10_100_val |= (1<<7);
9454 /* Enable autoneg and restart autoneg for legacy speeds */ 9470 /* Enable autoneg and restart autoneg for legacy speeds */
9455 autoneg_val |= (1<<9 | 1<<12); 9471 autoneg_val |= (1<<9 | 1<<12);
@@ -9539,9 +9555,6 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9539 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 9555 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
9540 1); 9556 1);
9541 9557
9542 /* Save spirom version */
9543 bnx2x_save_848xx_spirom_version(phy, params);
9544
9545 phy->req_line_speed = tmp_req_line_speed; 9558 phy->req_line_speed = tmp_req_line_speed;
9546 9559
9547 return 0; 9560 return 0;
@@ -9749,17 +9762,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9749 9762
9750 /* Wait for GPHY to come out of reset */ 9763 /* Wait for GPHY to come out of reset */
9751 msleep(50); 9764 msleep(50);
9752 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9765 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9753 /* Bring PHY out of super isolate mode */
9754 bnx2x_cl45_read(bp, phy,
9755 MDIO_CTL_DEVAD,
9756 MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
9757 val &= ~MDIO_84833_SUPER_ISOLATE;
9758 bnx2x_cl45_write(bp, phy,
9759 MDIO_CTL_DEVAD,
9760 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
9761 bnx2x_84833_pair_swap_cfg(phy, params, vars);
9762 } else {
9763 /* 9766 /*
9764 * BCM84823 requires that XGXS links up first @ 10G for normal 9767 * BCM84823 requires that XGXS links up first @ 10G for normal
9765 * behavior. 9768 * behavior.
@@ -9816,24 +9819,23 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9816 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", 9819 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
9817 params->multi_phy_config, val); 9820 params->multi_phy_config, val);
9818 9821
9819 /* AutogrEEEn */ 9822 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9820 if (params->feature_config_flags & 9823 bnx2x_84833_pair_swap_cfg(phy, params, vars);
9821 FEATURE_CONFIG_AUTOGREEEN_ENABLED)
9822 cmd_args[0] = 0x2;
9823 else
9824 cmd_args[0] = 0x0;
9825 9824
9826 cmd_args[1] = 0x0; 9825 /* Keep AutogrEEEn disabled. */
9827 cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; 9826 cmd_args[0] = 0x0;
9828 cmd_args[3] = PHY84833_CONSTANT_LATENCY; 9827 cmd_args[1] = 0x0;
9829 rc = bnx2x_84833_cmd_hdlr(phy, params, 9828 cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
9830 PHY84833_CMD_SET_EEE_MODE, cmd_args); 9829 cmd_args[3] = PHY84833_CONSTANT_LATENCY;
9831 if (rc != 0) 9830 rc = bnx2x_84833_cmd_hdlr(phy, params,
9832 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); 9831 PHY84833_CMD_SET_EEE_MODE, cmd_args);
9832 if (rc != 0)
9833 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
9834 }
9833 if (initialize) 9835 if (initialize)
9834 rc = bnx2x_848xx_cmn_config_init(phy, params, vars); 9836 rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
9835 else 9837 else
9836 bnx2x_save_848xx_spirom_version(phy, params); 9838 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9837 /* 84833 PHY has a better feature and doesn't need to support this. */ 9839 /* 84833 PHY has a better feature and doesn't need to support this. */
9838 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { 9840 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
9839 cms_enable = REG_RD(bp, params->shmem_base + 9841 cms_enable = REG_RD(bp, params->shmem_base +
@@ -9851,6 +9853,16 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9851 MDIO_CTL_REG_84823_USER_CTRL_REG, val); 9853 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
9852 } 9854 }
9853 9855
9856 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9857 /* Bring PHY out of super isolate mode as the final step. */
9858 bnx2x_cl45_read(bp, phy,
9859 MDIO_CTL_DEVAD,
9860 MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
9861 val &= ~MDIO_84833_SUPER_ISOLATE;
9862 bnx2x_cl45_write(bp, phy,
9863 MDIO_CTL_DEVAD,
9864 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
9865 }
9854 return rc; 9866 return rc;
9855} 9867}
9856 9868
@@ -9988,10 +10000,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
9988 } else { 10000 } else {
9989 bnx2x_cl45_read(bp, phy, 10001 bnx2x_cl45_read(bp, phy,
9990 MDIO_CTL_DEVAD, 10002 MDIO_CTL_DEVAD,
9991 0x400f, &val16); 10003 MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16);
10004 val16 |= MDIO_84833_SUPER_ISOLATE;
9992 bnx2x_cl45_write(bp, phy, 10005 bnx2x_cl45_write(bp, phy,
9993 MDIO_PMA_DEVAD, 10006 MDIO_CTL_DEVAD,
9994 MDIO_PMA_REG_CTRL, 0x800); 10007 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16);
9995 } 10008 }
9996} 10009}
9997 10010
@@ -11516,6 +11529,19 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11516 } 11529 }
11517 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); 11530 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
11518 11531
11532 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
11533 (phy->ver_addr)) {
11534 /*
11535 * Remove 100Mb link supported for BCM84833 when phy fw
11536 * version lower than or equal to 1.39
11537 */
11538 u32 raw_ver = REG_RD(bp, phy->ver_addr);
11539 if (((raw_ver & 0x7F) <= 39) &&
11540 (((raw_ver & 0xF80) >> 7) <= 1))
11541 phy->supported &= ~(SUPPORTED_100baseT_Half |
11542 SUPPORTED_100baseT_Full);
11543 }
11544
11519 /* 11545 /*
11520 * In case mdc/mdio_access of the external phy is different than the 11546 * In case mdc/mdio_access of the external phy is different than the
11521 * mdc/mdio access of the XGXS, a HW lock must be taken in each access 11547 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
@@ -12333,55 +12359,69 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
12333 u32 chip_id) 12359 u32 chip_id)
12334{ 12360{
12335 u8 reset_gpios; 12361 u8 reset_gpios;
12336 struct bnx2x_phy phy;
12337 u32 shmem_base, shmem2_base, cnt;
12338 s8 port = 0;
12339 u16 val;
12340
12341 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); 12362 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
12342 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); 12363 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
12343 udelay(10); 12364 udelay(10);
12344 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); 12365 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
12345 DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", 12366 DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
12346 reset_gpios); 12367 reset_gpios);
12347 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 12368 return 0;
12348 /* This PHY is for E2 and E3. */ 12369}
12349 shmem_base = shmem_base_path[port];
12350 shmem2_base = shmem2_base_path[port];
12351 /* Extract the ext phy address for the port */
12352 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
12353 0, &phy) !=
12354 0) {
12355 DP(NETIF_MSG_LINK, "populate_phy failed\n");
12356 return -EINVAL;
12357 }
12358 12370
12359 /* Wait for FW completing its initialization. */ 12371static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
12360 for (cnt = 0; cnt < 1000; cnt++) { 12372 struct bnx2x_phy *phy)
12361 bnx2x_cl45_read(bp, &phy, 12373{
12374 u16 val, cnt;
12375 /* Wait for FW completing its initialization. */
12376 for (cnt = 0; cnt < 1500; cnt++) {
12377 bnx2x_cl45_read(bp, phy,
12362 MDIO_PMA_DEVAD, 12378 MDIO_PMA_DEVAD,
12363 MDIO_PMA_REG_CTRL, &val); 12379 MDIO_PMA_REG_CTRL, &val);
12364 if (!(val & (1<<15))) 12380 if (!(val & (1<<15)))
12365 break; 12381 break;
12366 msleep(1); 12382 msleep(1);
12367 } 12383 }
12368 if (cnt >= 1000) 12384 if (cnt >= 1500) {
12369 DP(NETIF_MSG_LINK, 12385 DP(NETIF_MSG_LINK, "84833 reset timeout\n");
12370 "84833 Cmn reset timeout (%d)\n", port); 12386 return -EINVAL;
12371
12372 /* Put the port in super isolate mode. */
12373 bnx2x_cl45_read(bp, &phy,
12374 MDIO_CTL_DEVAD,
12375 MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
12376 val |= MDIO_84833_SUPER_ISOLATE;
12377 bnx2x_cl45_write(bp, &phy,
12378 MDIO_CTL_DEVAD,
12379 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
12380 } 12387 }
12381 12388
12389 /* Put the port in super isolate mode. */
12390 bnx2x_cl45_read(bp, phy,
12391 MDIO_CTL_DEVAD,
12392 MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
12393 val |= MDIO_84833_SUPER_ISOLATE;
12394 bnx2x_cl45_write(bp, phy,
12395 MDIO_CTL_DEVAD,
12396 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
12397
12398 /* Save spirom version */
12399 bnx2x_save_848xx_spirom_version(phy, bp, PORT_0);
12382 return 0; 12400 return 0;
12383} 12401}
12384 12402
12403int bnx2x_pre_init_phy(struct bnx2x *bp,
12404 u32 shmem_base,
12405 u32 shmem2_base,
12406 u32 chip_id)
12407{
12408 int rc = 0;
12409 struct bnx2x_phy phy;
12410 bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
12411 if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
12412 PORT_0, &phy)) {
12413 DP(NETIF_MSG_LINK, "populate_phy failed\n");
12414 return -EINVAL;
12415 }
12416 switch (phy.type) {
12417 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
12418 rc = bnx2x_84833_pre_init_phy(bp, &phy);
12419 break;
12420 default:
12421 break;
12422 }
12423 return rc;
12424}
12385 12425
12386static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], 12426static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12387 u32 shmem2_base_path[], u8 phy_index, 12427 u32 shmem2_base_path[], u8 phy_index,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 44609de4e5d..dddbcf6e154 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2176,6 +2176,7 @@
2176 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to 2176 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
2177 * accommodate the 9 input clients to ETS arbiter. */ 2177 * accommodate the 9 input clients to ETS arbiter. */
2178#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 2178#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
2179#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
2179#define NIG_REG_P1_MAC_IN_EN 0x185c0 2180#define NIG_REG_P1_MAC_IN_EN 0x185c0
2180/* [RW 1] Output enable for TX MAC interface */ 2181/* [RW 1] Output enable for TX MAC interface */
2181#define NIG_REG_P1_MAC_OUT_EN 0x185c4 2182#define NIG_REG_P1_MAC_OUT_EN 0x185c4
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 076e02a415a..d529af99157 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8846,9 +8846,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8846 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 8846 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8847 udelay(100); 8847 udelay(100);
8848 8848
8849 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { 8849 if (tg3_flag(tp, USING_MSIX)) {
8850 val = tr32(MSGINT_MODE); 8850 val = tr32(MSGINT_MODE);
8851 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; 8851 val |= MSGINT_MODE_ENABLE;
8852 if (tp->irq_cnt > 1)
8853 val |= MSGINT_MODE_MULTIVEC_EN;
8852 if (!tg3_flag(tp, 1SHOT_MSI)) 8854 if (!tg3_flag(tp, 1SHOT_MSI))
8853 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 8855 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8854 tw32(MSGINT_MODE, val); 8856 tw32(MSGINT_MODE, val);
@@ -9548,19 +9550,18 @@ static int tg3_request_firmware(struct tg3 *tp)
9548 9550
9549static bool tg3_enable_msix(struct tg3 *tp) 9551static bool tg3_enable_msix(struct tg3 *tp)
9550{ 9552{
9551 int i, rc, cpus = num_online_cpus(); 9553 int i, rc;
9552 struct msix_entry msix_ent[tp->irq_max]; 9554 struct msix_entry msix_ent[tp->irq_max];
9553 9555
9554 if (cpus == 1) 9556 tp->irq_cnt = num_online_cpus();
9555 /* Just fallback to the simpler MSI mode. */ 9557 if (tp->irq_cnt > 1) {
9556 return false; 9558 /* We want as many rx rings enabled as there are cpus.
9557 9559 * In multiqueue MSI-X mode, the first MSI-X vector
9558 /* 9560 * only deals with link interrupts, etc, so we add
9559 * We want as many rx rings enabled as there are cpus. 9561 * one to the number of vectors we are requesting.
9560 * The first MSIX vector only deals with link interrupts, etc, 9562 */
9561 * so we add one to the number of vectors we are requesting. 9563 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9562 */ 9564 }
9563 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9564 9565
9565 for (i = 0; i < tp->irq_max; i++) { 9566 for (i = 0; i < tp->irq_max; i++) {
9566 msix_ent[i].entry = i; 9567 msix_ent[i].entry = i;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 05b7359bde8..6bdd8e36e56 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -263,7 +263,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
263 data[i++] = atomic_read(&port->port_res[k].swqe_avail); 263 data[i++] = atomic_read(&port->port_res[k].swqe_avail);
264} 264}
265 265
266const struct ethtool_ops ehea_ethtool_ops = { 266static const struct ethtool_ops ehea_ethtool_ops = {
267 .get_settings = ehea_get_settings, 267 .get_settings = ehea_get_settings,
268 .get_drvinfo = ehea_get_drvinfo, 268 .get_drvinfo = ehea_get_drvinfo,
269 .get_msglevel = ehea_get_msglevel, 269 .get_msglevel = ehea_get_msglevel,
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3554414eb5e..5d5fb262718 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -94,8 +94,8 @@ static int port_name_cnt;
94static LIST_HEAD(adapter_list); 94static LIST_HEAD(adapter_list);
95static unsigned long ehea_driver_flags; 95static unsigned long ehea_driver_flags;
96static DEFINE_MUTEX(dlpar_mem_lock); 96static DEFINE_MUTEX(dlpar_mem_lock);
97struct ehea_fw_handle_array ehea_fw_handles; 97static struct ehea_fw_handle_array ehea_fw_handles;
98struct ehea_bcmc_reg_array ehea_bcmc_regs; 98static struct ehea_bcmc_reg_array ehea_bcmc_regs;
99 99
100 100
101static int __devinit ehea_probe_adapter(struct platform_device *dev, 101static int __devinit ehea_probe_adapter(struct platform_device *dev,
@@ -133,7 +133,7 @@ void ehea_dump(void *adr, int len, char *msg)
133 } 133 }
134} 134}
135 135
136void ehea_schedule_port_reset(struct ehea_port *port) 136static void ehea_schedule_port_reset(struct ehea_port *port)
137{ 137{
138 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags)) 138 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
139 schedule_work(&port->reset_task); 139 schedule_work(&port->reset_task);
@@ -1404,7 +1404,7 @@ out:
1404 return ret; 1404 return ret;
1405} 1405}
1406 1406
1407int ehea_gen_smrs(struct ehea_port_res *pr) 1407static int ehea_gen_smrs(struct ehea_port_res *pr)
1408{ 1408{
1409 int ret; 1409 int ret;
1410 struct ehea_adapter *adapter = pr->port->adapter; 1410 struct ehea_adapter *adapter = pr->port->adapter;
@@ -1426,7 +1426,7 @@ out:
1426 return -EIO; 1426 return -EIO;
1427} 1427}
1428 1428
1429int ehea_rem_smrs(struct ehea_port_res *pr) 1429static int ehea_rem_smrs(struct ehea_port_res *pr)
1430{ 1430{
1431 if ((ehea_rem_mr(&pr->send_mr)) || 1431 if ((ehea_rem_mr(&pr->send_mr)) ||
1432 (ehea_rem_mr(&pr->recv_mr))) 1432 (ehea_rem_mr(&pr->recv_mr)))
@@ -2190,7 +2190,7 @@ out:
2190 return err; 2190 return err;
2191} 2191}
2192 2192
2193int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) 2193static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2194{ 2194{
2195 int ret = -EIO; 2195 int ret = -EIO;
2196 u64 hret; 2196 u64 hret;
@@ -2531,7 +2531,7 @@ static void ehea_flush_sq(struct ehea_port *port)
2531 } 2531 }
2532} 2532}
2533 2533
2534int ehea_stop_qps(struct net_device *dev) 2534static int ehea_stop_qps(struct net_device *dev)
2535{ 2535{
2536 struct ehea_port *port = netdev_priv(dev); 2536 struct ehea_port *port = netdev_priv(dev);
2537 struct ehea_adapter *adapter = port->adapter; 2537 struct ehea_adapter *adapter = port->adapter;
@@ -2600,7 +2600,7 @@ out:
2600 return ret; 2600 return ret;
2601} 2601}
2602 2602
2603void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) 2603static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2604{ 2604{
2605 struct ehea_qp qp = *orig_qp; 2605 struct ehea_qp qp = *orig_qp;
2606 struct ehea_qp_init_attr *init_attr = &qp.init_attr; 2606 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2633,7 +2633,7 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2633 } 2633 }
2634} 2634}
2635 2635
2636int ehea_restart_qps(struct net_device *dev) 2636static int ehea_restart_qps(struct net_device *dev)
2637{ 2637{
2638 struct ehea_port *port = netdev_priv(dev); 2638 struct ehea_port *port = netdev_priv(dev);
2639 struct ehea_adapter *adapter = port->adapter; 2639 struct ehea_adapter *adapter = port->adapter;
@@ -2824,7 +2824,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
2824 ehea_schedule_port_reset(port); 2824 ehea_schedule_port_reset(port);
2825} 2825}
2826 2826
2827int ehea_sense_adapter_attr(struct ehea_adapter *adapter) 2827static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2828{ 2828{
2829 struct hcp_query_ehea *cb; 2829 struct hcp_query_ehea *cb;
2830 u64 hret; 2830 u64 hret;
@@ -2852,7 +2852,7 @@ out:
2852 return ret; 2852 return ret;
2853} 2853}
2854 2854
2855int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) 2855static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2856{ 2856{
2857 struct hcp_ehea_port_cb4 *cb4; 2857 struct hcp_ehea_port_cb4 *cb4;
2858 u64 hret; 2858 u64 hret;
@@ -2966,7 +2966,7 @@ static const struct net_device_ops ehea_netdev_ops = {
2966 .ndo_tx_timeout = ehea_tx_watchdog, 2966 .ndo_tx_timeout = ehea_tx_watchdog,
2967}; 2967};
2968 2968
2969struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, 2969static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2970 u32 logical_port_id, 2970 u32 logical_port_id,
2971 struct device_node *dn) 2971 struct device_node *dn)
2972{ 2972{
@@ -3237,7 +3237,7 @@ static ssize_t ehea_remove_port(struct device *dev,
3237static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port); 3237static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3238static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port); 3238static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3239 3239
3240int ehea_create_device_sysfs(struct platform_device *dev) 3240static int ehea_create_device_sysfs(struct platform_device *dev)
3241{ 3241{
3242 int ret = device_create_file(&dev->dev, &dev_attr_probe_port); 3242 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3243 if (ret) 3243 if (ret)
@@ -3248,7 +3248,7 @@ out:
3248 return ret; 3248 return ret;
3249} 3249}
3250 3250
3251void ehea_remove_device_sysfs(struct platform_device *dev) 3251static void ehea_remove_device_sysfs(struct platform_device *dev)
3252{ 3252{
3253 device_remove_file(&dev->dev, &dev_attr_probe_port); 3253 device_remove_file(&dev->dev, &dev_attr_probe_port);
3254 device_remove_file(&dev->dev, &dev_attr_remove_port); 3254 device_remove_file(&dev->dev, &dev_attr_remove_port);
@@ -3379,7 +3379,7 @@ static int __devexit ehea_remove(struct platform_device *dev)
3379 return 0; 3379 return 0;
3380} 3380}
3381 3381
3382void ehea_crash_handler(void) 3382static void ehea_crash_handler(void)
3383{ 3383{
3384 int i; 3384 int i;
3385 3385
@@ -3491,7 +3491,7 @@ static ssize_t ehea_show_capabilities(struct device_driver *drv,
3491static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH, 3491static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3492 ehea_show_capabilities, NULL); 3492 ehea_show_capabilities, NULL);
3493 3493
3494int __init ehea_module_init(void) 3494static int __init ehea_module_init(void)
3495{ 3495{
3496 int ret; 3496 int ret;
3497 3497
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 95b9f4fa811..c25b05b94da 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -34,9 +34,7 @@
34#include "ehea_phyp.h" 34#include "ehea_phyp.h"
35#include "ehea_qmr.h" 35#include "ehea_qmr.h"
36 36
37struct ehea_bmap *ehea_bmap = NULL; 37static struct ehea_bmap *ehea_bmap;
38
39
40 38
41static void *hw_qpageit_get_inc(struct hw_queue *queue) 39static void *hw_qpageit_get_inc(struct hw_queue *queue)
42{ 40{
@@ -212,7 +210,7 @@ out_nomem:
212 return NULL; 210 return NULL;
213} 211}
214 212
215u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force) 213static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
216{ 214{
217 u64 hret; 215 u64 hret;
218 u64 adapter_handle = cq->adapter->handle; 216 u64 adapter_handle = cq->adapter->handle;
@@ -337,7 +335,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
337 return eqe; 335 return eqe;
338} 336}
339 337
340u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force) 338static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
341{ 339{
342 u64 hret; 340 u64 hret;
343 unsigned long flags; 341 unsigned long flags;
@@ -381,7 +379,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
381/** 379/**
382 * allocates memory for a queue and registers pages in phyp 380 * allocates memory for a queue and registers pages in phyp
383 */ 381 */
384int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, 382static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
385 int nr_pages, int wqe_size, int act_nr_sges, 383 int nr_pages, int wqe_size, int act_nr_sges,
386 struct ehea_adapter *adapter, int h_call_q_selector) 384 struct ehea_adapter *adapter, int h_call_q_selector)
387{ 385{
@@ -516,7 +514,7 @@ out_freemem:
516 return NULL; 514 return NULL;
517} 515}
518 516
519u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) 517static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
520{ 518{
521 u64 hret; 519 u64 hret;
522 struct ehea_qp_init_attr *qp_attr = &qp->init_attr; 520 struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
@@ -976,7 +974,7 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
976 return 0; 974 return 0;
977} 975}
978 976
979void print_error_data(u64 *data) 977static void print_error_data(u64 *data)
980{ 978{
981 int length; 979 int length;
982 u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]); 980 u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 75ec87a822b..0a85690a132 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -459,7 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; 459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
460 460
461 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, 461 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
462 &ctl->sg, 1, DMA_TO_DEVICE, 462 &ctl->sg, 1, DMA_MEM_TO_DEV,
463 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 463 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
464 if (!ctl->adesc) 464 if (!ctl->adesc)
465 return NETDEV_TX_BUSY; 465 return NETDEV_TX_BUSY;
@@ -571,7 +571,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
571 sg_dma_len(sg) = DMA_BUFFER_SIZE; 571 sg_dma_len(sg) = DMA_BUFFER_SIZE;
572 572
573 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, 573 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
574 sg, 1, DMA_FROM_DEVICE, 574 sg, 1, DMA_DEV_TO_MEM,
575 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 575 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
576 576
577 if (!ctl->adesc) 577 if (!ctl->adesc)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6ece4295d78..813d41c4a84 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1703,7 +1703,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
1703 mdp->mii_bus->name = "sh_mii"; 1703 mdp->mii_bus->name = "sh_mii";
1704 mdp->mii_bus->parent = &ndev->dev; 1704 mdp->mii_bus->parent = &ndev->dev;
1705 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 1705 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1706 mdp->pdev->name, pdid); 1706 mdp->pdev->name, id);
1707 1707
1708 /* PHY IRQ */ 1708 /* PHY IRQ */
1709 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1709 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 88c81c5706b..09b8c9dbf78 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -557,10 +557,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
557 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 557 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
558 else if (rxsp->status11 & AR_MichaelErr) 558 else if (rxsp->status11 & AR_MichaelErr)
559 rxs->rs_status |= ATH9K_RXERR_MIC; 559 rxs->rs_status |= ATH9K_RXERR_MIC;
560 if (rxsp->status11 & AR_KeyMiss)
561 rxs->rs_status |= ATH9K_RXERR_KEYMISS;
562 } 560 }
563 561
562 if (rxsp->status11 & AR_KeyMiss)
563 rxs->rs_status |= ATH9K_RXERR_KEYMISS;
564
564 return 0; 565 return 0;
565} 566}
566EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma); 567EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index fd3f19c2e55..e196aba77ac 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -618,10 +618,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
618 rs->rs_status |= ATH9K_RXERR_DECRYPT; 618 rs->rs_status |= ATH9K_RXERR_DECRYPT;
619 else if (ads.ds_rxstatus8 & AR_MichaelErr) 619 else if (ads.ds_rxstatus8 & AR_MichaelErr)
620 rs->rs_status |= ATH9K_RXERR_MIC; 620 rs->rs_status |= ATH9K_RXERR_MIC;
621 if (ads.ds_rxstatus8 & AR_KeyMiss)
622 rs->rs_status |= ATH9K_RXERR_KEYMISS;
623 } 621 }
624 622
623 if (ads.ds_rxstatus8 & AR_KeyMiss)
624 rs->rs_status |= ATH9K_RXERR_KEYMISS;
625
625 return 0; 626 return 0;
626} 627}
627EXPORT_SYMBOL(ath9k_hw_rxprocdesc); 628EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 1c6f19393ef..b91f28ef103 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4852,6 +4852,9 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4852 4852
4853 cancel_work_sync(&(wl->beacon_update_trigger)); 4853 cancel_work_sync(&(wl->beacon_update_trigger));
4854 4854
4855 if (!dev)
4856 goto out;
4857
4855 mutex_lock(&wl->mutex); 4858 mutex_lock(&wl->mutex);
4856 if (b43_status(dev) >= B43_STAT_STARTED) { 4859 if (b43_status(dev) >= B43_STAT_STARTED) {
4857 dev = b43_wireless_core_stop(dev); 4860 dev = b43_wireless_core_stop(dev);
@@ -4863,7 +4866,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4863 4866
4864out_unlock: 4867out_unlock:
4865 mutex_unlock(&wl->mutex); 4868 mutex_unlock(&wl->mutex);
4866 4869out:
4867 cancel_work_sync(&(wl->txpower_adjust_work)); 4870 cancel_work_sync(&(wl->txpower_adjust_work));
4868} 4871}
4869 4872
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index f23b0c3e4ea..bf11850a20f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -2475,7 +2475,7 @@ static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
2475 return err; 2475 return err;
2476} 2476}
2477 2477
2478static void brcmf_delay(u32 ms) 2478static __always_inline void brcmf_delay(u32 ms)
2479{ 2479{
2480 if (ms < 1000 / HZ) { 2480 if (ms < 1000 / HZ) {
2481 cond_resched(); 2481 cond_resched();
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index d106576ce33..448ab9c4eb4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1128,14 +1128,7 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
1128 return 0; 1128 return 0;
1129} 1129}
1130 1130
1131static int brcms_pci_suspend(struct pci_dev *pdev) 1131static int brcms_suspend(struct bcma_device *pdev)
1132{
1133 pci_save_state(pdev);
1134 pci_disable_device(pdev);
1135 return pci_set_power_state(pdev, PCI_D3hot);
1136}
1137
1138static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
1139{ 1132{
1140 struct brcms_info *wl; 1133 struct brcms_info *wl;
1141 struct ieee80211_hw *hw; 1134 struct ieee80211_hw *hw;
@@ -1153,40 +1146,15 @@ static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
1153 wl->pub->hw_up = false; 1146 wl->pub->hw_up = false;
1154 spin_unlock_bh(&wl->lock); 1147 spin_unlock_bh(&wl->lock);
1155 1148
1156 /* temporarily do suspend ourselves */ 1149 pr_debug("brcms_suspend ok\n");
1157 return brcms_pci_suspend(pdev->bus->host_pci);
1158}
1159
1160static int brcms_pci_resume(struct pci_dev *pdev)
1161{
1162 int err = 0;
1163 uint val;
1164
1165 err = pci_set_power_state(pdev, PCI_D0);
1166 if (err)
1167 return err;
1168
1169 pci_restore_state(pdev);
1170
1171 err = pci_enable_device(pdev);
1172 if (err)
1173 return err;
1174
1175 pci_set_master(pdev);
1176
1177 pci_read_config_dword(pdev, 0x40, &val);
1178 if ((val & 0x0000ff00) != 0)
1179 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1180 1150
1181 return 0; 1151 return 0;
1182} 1152}
1183 1153
1184static int brcms_resume(struct bcma_device *pdev) 1154static int brcms_resume(struct bcma_device *pdev)
1185{ 1155{
1186 /* 1156 pr_debug("brcms_resume ok\n");
1187 * just do pci resume for now until bcma supports it. 1157 return 0;
1188 */
1189 return brcms_pci_resume(pdev->bus->host_pci);
1190} 1158}
1191 1159
1192static struct bcma_driver brcms_bcma_driver = { 1160static struct bcma_driver brcms_bcma_driver = {
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 018a8deb88a..4fcdac63a30 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -7848,7 +7848,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7848 * more efficiently than we can parse it. ORDER MATTERS HERE */ 7848 * more efficiently than we can parse it. ORDER MATTERS HERE */
7849 struct ipw_rt_hdr *ipw_rt; 7849 struct ipw_rt_hdr *ipw_rt;
7850 7850
7851 short len = le16_to_cpu(pkt->u.frame.length); 7851 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7852 7852
7853 /* We received data from the HW, so stop the watchdog */ 7853 /* We received data from the HW, so stop the watchdog */
7854 dev->trans_start = jiffies; 7854 dev->trans_start = jiffies;
@@ -8023,7 +8023,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8023 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM; 8023 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8024 s8 noise = (s8) le16_to_cpu(frame->noise); 8024 s8 noise = (s8) le16_to_cpu(frame->noise);
8025 u8 rate = frame->rate; 8025 u8 rate = frame->rate;
8026 short len = le16_to_cpu(pkt->u.frame.length); 8026 unsigned short len = le16_to_cpu(pkt->u.frame.length);
8027 struct sk_buff *skb; 8027 struct sk_buff *skb;
8028 int hdr_only = 0; 8028 int hdr_only = 0;
8029 u16 filter = priv->prom_priv->filter; 8029 u16 filter = priv->prom_priv->filter;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 084aa2c4ccf..a6454726737 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -569,7 +569,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
569 struct iwl_scan_cmd *scan; 569 struct iwl_scan_cmd *scan;
570 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 570 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
571 u32 rate_flags = 0; 571 u32 rate_flags = 0;
572 u16 cmd_len; 572 u16 cmd_len = 0;
573 u16 rx_chain = 0; 573 u16 rx_chain = 0;
574 enum ieee80211_band band; 574 enum ieee80211_band band;
575 u8 n_probes = 0; 575 u8 n_probes = 0;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 7becea3dec6..dd5aeaff44b 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2777,7 +2777,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
2777 else if (channel->band == IEEE80211_BAND_5GHZ) 2777 else if (channel->band == IEEE80211_BAND_5GHZ)
2778 cmd->band = cpu_to_le16(0x4); 2778 cmd->band = cpu_to_le16(0x4);
2779 2779
2780 cmd->channel = channel->hw_value; 2780 cmd->channel = cpu_to_le16(channel->hw_value);
2781 2781
2782 if (conf->channel_type == NL80211_CHAN_NO_HT || 2782 if (conf->channel_type == NL80211_CHAN_NO_HT ||
2783 conf->channel_type == NL80211_CHAN_HT20) { 2783 conf->channel_type == NL80211_CHAN_HT20) {
@@ -4066,7 +4066,7 @@ static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
4066 goto done; 4066 goto done;
4067 4067
4068 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || 4068 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
4069 WLAN_CIPHER_SUITE_WEP104) 4069 key->cipher == WLAN_CIPHER_SUITE_WEP104)
4070 mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0; 4070 mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
4071 4071
4072 cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY); 4072 cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 4941a1a2321..dc88baefa72 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
422static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 422static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
423 enum dev_state state) 423 enum dev_state state)
424{ 424{
425 int mask = (state == STATE_RADIO_IRQ_ON);
426 u32 reg; 425 u32 reg;
427 unsigned long flags; 426 unsigned long flags;
428 427
@@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
436 } 435 }
437 436
438 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 437 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
439 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 438 reg = 0;
440 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0); 439 if (state == STATE_RADIO_IRQ_ON) {
441 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0); 440 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
442 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask); 441 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
443 rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0); 442 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
444 rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0); 443 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
445 rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0); 444 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
446 rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0); 445 }
447 rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
448 rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
449 rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
450 rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
451 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
452 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
453 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
454 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
455 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
456 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
457 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
458 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 446 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
459 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 447 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
460 448
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 06ea3bcfdd2..16570aa84aa 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,16 +830,11 @@ config SCSI_ISCI
830 tristate "Intel(R) C600 Series Chipset SAS Controller" 830 tristate "Intel(R) C600 Series Chipset SAS Controller"
831 depends on PCI && SCSI 831 depends on PCI && SCSI
832 depends on X86 832 depends on X86
833 # (temporary): known alpha quality driver
834 depends on EXPERIMENTAL
835 select SCSI_SAS_LIBSAS 833 select SCSI_SAS_LIBSAS
836 select SCSI_SAS_HOST_SMP
837 ---help--- 834 ---help---
838 This driver supports the 6Gb/s SAS capabilities of the storage 835 This driver supports the 6Gb/s SAS capabilities of the storage
839 control unit found in the Intel(R) C600 series chipset. 836 control unit found in the Intel(R) C600 series chipset.
840 837
841 The experimental tag will be removed after the driver exits alpha
842
843config SCSI_GENERIC_NCR5380 838config SCSI_GENERIC_NCR5380
844 tristate "Generic NCR5380/53c400 SCSI PIO support" 839 tristate "Generic NCR5380/53c400 SCSI PIO support"
845 depends on ISA && SCSI 840 depends on ISA && SCSI
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 78963be2c4f..cb07c628b2f 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -673,12 +673,7 @@ struct bfa_itnim_iostats_s {
673 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ 673 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
674 u32 tm_cleanups; /* TM cleanup requests */ 674 u32 tm_cleanups; /* TM cleanup requests */
675 u32 tm_cleanup_comps; /* TM cleanup completions */ 675 u32 tm_cleanup_comps; /* TM cleanup completions */
676 u32 lm_lun_across_sg; /* LM lun is across sg data buf */ 676 u32 rsvd[6];
677 u32 lm_lun_not_sup; /* LM lun not supported */
678 u32 lm_rpl_data_changed; /* LM report-lun data changed */
679 u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
680 u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
681 u32 lm_lun_not_rdy; /* LM lun not ready */
682}; 677};
683 678
684/* Modify char* port_stt[] in bfal_port.c if a new state was added */ 679/* Modify char* port_stt[] in bfal_port.c if a new state was added */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 50b6a1c8619..8d0b88f67a3 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,161 +56,6 @@ struct scsi_cdb_s {
56 56
57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ 57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
58 58
59#define SCSI_SENSE_CUR_ERR 0x70
60#define SCSI_SENSE_DEF_ERR 0x71
61
62/*
63 * SCSI additional sense codes
64 */
65#define SCSI_ASC_LUN_NOT_READY 0x04
66#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
67#define SCSI_ASC_TOCC 0x3F
68
69/*
70 * SCSI additional sense code qualifiers
71 */
72#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
73#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
74
75/*
76 * Methods of reporting informational exceptions
77 */
78#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
79
80struct scsi_report_luns_data_s {
81 u32 lun_list_length; /* length of LUN list length */
82 u32 reserved;
83 struct scsi_lun lun[1]; /* first LUN in lun list */
84};
85
86struct scsi_inquiry_vendor_s {
87 u8 vendor_id[8];
88};
89
90struct scsi_inquiry_prodid_s {
91 u8 product_id[16];
92};
93
94struct scsi_inquiry_prodrev_s {
95 u8 product_rev[4];
96};
97
98struct scsi_inquiry_data_s {
99#ifdef __BIG_ENDIAN
100 u8 peripheral_qual:3; /* peripheral qualifier */
101 u8 device_type:5; /* peripheral device type */
102 u8 rmb:1; /* removable medium bit */
103 u8 device_type_mod:7; /* device type modifier */
104 u8 version;
105 u8 aenc:1; /* async evt notification capability */
106 u8 trm_iop:1; /* terminate I/O process */
107 u8 norm_aca:1; /* normal ACA supported */
108 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
109 u8 rsp_data_format:4;
110 u8 additional_len;
111 u8 sccs:1;
112 u8 reserved1:7;
113 u8 reserved2:1;
114 u8 enc_serv:1; /* enclosure service component */
115 u8 reserved3:1;
116 u8 multi_port:1; /* multi-port device */
117 u8 m_chngr:1; /* device in medium transport element */
118 u8 ack_req_q:1; /* SIP specific bit */
119 u8 addr32:1; /* SIP specific bit */
120 u8 addr16:1; /* SIP specific bit */
121 u8 rel_adr:1; /* relative address */
122 u8 w_bus32:1;
123 u8 w_bus16:1;
124 u8 synchronous:1;
125 u8 linked_commands:1;
126 u8 trans_dis:1;
127 u8 cmd_queue:1; /* command queueing supported */
128 u8 soft_reset:1; /* soft reset alternative (VS) */
129#else
130 u8 device_type:5; /* peripheral device type */
131 u8 peripheral_qual:3; /* peripheral qualifier */
132 u8 device_type_mod:7; /* device type modifier */
133 u8 rmb:1; /* removable medium bit */
134 u8 version;
135 u8 rsp_data_format:4;
136 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
137 u8 norm_aca:1; /* normal ACA supported */
138 u8 terminate_iop:1;/* terminate I/O process */
139 u8 aenc:1; /* async evt notification capability */
140 u8 additional_len;
141 u8 reserved1:7;
142 u8 sccs:1;
143 u8 addr16:1; /* SIP specific bit */
144 u8 addr32:1; /* SIP specific bit */
145 u8 ack_req_q:1; /* SIP specific bit */
146 u8 m_chngr:1; /* device in medium transport element */
147 u8 multi_port:1; /* multi-port device */
148 u8 reserved3:1; /* TBD - Vendor Specific */
149 u8 enc_serv:1; /* enclosure service component */
150 u8 reserved2:1;
151 u8 soft_seset:1; /* soft reset alternative (VS) */
152 u8 cmd_queue:1; /* command queueing supported */
153 u8 trans_dis:1;
154 u8 linked_commands:1;
155 u8 synchronous:1;
156 u8 w_bus16:1;
157 u8 w_bus32:1;
158 u8 rel_adr:1; /* relative address */
159#endif
160 struct scsi_inquiry_vendor_s vendor_id;
161 struct scsi_inquiry_prodid_s product_id;
162 struct scsi_inquiry_prodrev_s product_rev;
163 u8 vendor_specific[20];
164 u8 reserved4[40];
165};
166
167/*
168 * SCSI sense data format
169 */
170struct scsi_sense_s {
171#ifdef __BIG_ENDIAN
172 u8 valid:1;
173 u8 rsp_code:7;
174#else
175 u8 rsp_code:7;
176 u8 valid:1;
177#endif
178 u8 seg_num;
179#ifdef __BIG_ENDIAN
180 u8 file_mark:1;
181 u8 eom:1; /* end of media */
182 u8 ili:1; /* incorrect length indicator */
183 u8 reserved:1;
184 u8 sense_key:4;
185#else
186 u8 sense_key:4;
187 u8 reserved:1;
188 u8 ili:1; /* incorrect length indicator */
189 u8 eom:1; /* end of media */
190 u8 file_mark:1;
191#endif
192 u8 information[4]; /* device-type or cmd specific info */
193 u8 add_sense_length; /* additional sense length */
194 u8 command_info[4];/* command specific information */
195 u8 asc; /* additional sense code */
196 u8 ascq; /* additional sense code qualifier */
197 u8 fru_code; /* field replaceable unit code */
198#ifdef __BIG_ENDIAN
199 u8 sksv:1; /* sense key specific valid */
200 u8 c_d:1; /* command/data bit */
201 u8 res1:2;
202 u8 bpv:1; /* bit pointer valid */
203 u8 bpointer:3; /* bit pointer */
204#else
205 u8 bpointer:3; /* bit pointer */
206 u8 bpv:1; /* bit pointer valid */
207 u8 res1:2;
208 u8 c_d:1; /* command/data bit */
209 u8 sksv:1; /* sense key specific valid */
210#endif
211 u8 fpointer[2]; /* field pointer */
212};
213
214/* 59/*
215 * Fibre Channel Header Structure (FCHS) definition 60 * Fibre Channel Header Structure (FCHS) definition
216 */ 61 */
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index e07bd4745d8..f0f80e282e3 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
24 * BFA ITNIM Related definitions 24 * BFA ITNIM Related definitions
25 */ 25 */
26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); 26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
29static void bfa_ioim_lm_init(struct bfa_s *bfa); 27static void bfa_ioim_lm_init(struct bfa_s *bfa);
30 28
31#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ 29#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
@@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
60 } \ 58 } \
61} while (0) 59} while (0)
62 60
63#define bfa_ioim_rp_wwn(__ioim) \
64 (((struct bfa_fcs_rport_s *) \
65 (__ioim)->itnim->rport->rport_drv)->pwwn)
66
67#define bfa_ioim_lp_wwn(__ioim) \
68 ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
69 (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
70
71#define bfa_itnim_sler_cb(__itnim) do { \ 61#define bfa_itnim_sler_cb(__itnim) do { \
72 if ((__itnim)->bfa->fcs) \ 62 if ((__itnim)->bfa->fcs) \
73 bfa_cb_itnim_sler((__itnim)->ditn); \ 63 bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
77 } \ 67 } \
78} while (0) 68} while (0)
79 69
80enum bfa_ioim_lm_status {
81 BFA_IOIM_LM_PRESENT = 1,
82 BFA_IOIM_LM_LUN_NOT_SUP = 2,
83 BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
84 BFA_IOIM_LM_LUN_NOT_RDY = 4,
85};
86
87enum bfa_ioim_lm_ua_status { 70enum bfa_ioim_lm_ua_status {
88 BFA_IOIM_LM_UA_RESET = 0, 71 BFA_IOIM_LM_UA_RESET = 0,
89 BFA_IOIM_LM_UA_SET = 1, 72 BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@ enum bfa_ioim_event {
145 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ 128 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
146 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ 129 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
147 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ 130 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
148 BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
149 BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
150 BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
151}; 131};
152 132
153 133
@@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
245static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); 225static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
246static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); 226static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
247static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 227static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
248static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
249static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
250static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
251 228
252/* 229/*
253 * forward declaration of BFA IO state machine 230 * forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
445 bfa_fcpim_add_iostats(lstats, rstats, output_reqs); 422 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
446 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); 423 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
447 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); 424 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
448 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
449 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
450 bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
451 bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
452 bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
453 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
454} 425}
455 426
456bfa_status_t 427bfa_status_t
@@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1580 __bfa_cb_ioim_abort, ioim); 1551 __bfa_cb_ioim_abort, ioim);
1581 break; 1552 break;
1582 1553
1583 case BFA_IOIM_SM_LM_LUN_NOT_SUP:
1584 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1585 bfa_ioim_move_to_comp_q(ioim);
1586 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1587 __bfa_cb_ioim_lm_lun_not_sup, ioim);
1588 break;
1589
1590 case BFA_IOIM_SM_LM_RPL_DC:
1591 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1592 bfa_ioim_move_to_comp_q(ioim);
1593 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1594 __bfa_cb_ioim_lm_rpl_dc, ioim);
1595 break;
1596
1597 case BFA_IOIM_SM_LM_LUN_NOT_RDY:
1598 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599 bfa_ioim_move_to_comp_q(ioim);
1600 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1601 __bfa_cb_ioim_lm_lun_not_rdy, ioim);
1602 break;
1603
1604 default: 1554 default:
1605 bfa_sm_fault(ioim->bfa, event); 1555 bfa_sm_fault(ioim->bfa, event);
1606 } 1556 }
@@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
2160 } 2110 }
2161} 2111}
2162 2112
2163/*
2164 * Validate LUN for LUN masking
2165 */
2166static enum bfa_ioim_lm_status
2167bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
2168 struct bfa_rport_s *rp, struct scsi_lun lun)
2169{
2170 u8 i;
2171 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2172 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2173 struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
2174
2175 if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
2176 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2177 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2178 return BFA_IOIM_LM_PRESENT;
2179 }
2180
2181 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2182
2183 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2184 continue;
2185
2186 if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
2187 scsilun_to_int((struct scsi_lun *)&lun))
2188 && (rp->rport_tag == lun_list[i].rp_tag)
2189 && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
2190 lun_list[i].lp_tag)) {
2191 bfa_trc(ioim->bfa, lun_list[i].rp_tag);
2192 bfa_trc(ioim->bfa, lun_list[i].lp_tag);
2193 bfa_trc(ioim->bfa, scsilun_to_int(
2194 (struct scsi_lun *)&lun_list[i].lun));
2195
2196 if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
2197 ((cdb->scsi_cdb[0] != INQUIRY) ||
2198 (cdb->scsi_cdb[0] != REPORT_LUNS))) {
2199 lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
2200 return BFA_IOIM_LM_RPL_DATA_CHANGED;
2201 }
2202
2203 if (cdb->scsi_cdb[0] == REPORT_LUNS)
2204 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2205
2206 return BFA_IOIM_LM_PRESENT;
2207 }
2208 }
2209
2210 if ((cdb->scsi_cdb[0] == INQUIRY) &&
2211 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2212 ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
2213 return BFA_IOIM_LM_PRESENT;
2214 }
2215
2216 if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
2217 return BFA_IOIM_LM_LUN_NOT_RDY;
2218
2219 return BFA_IOIM_LM_LUN_NOT_SUP;
2220}
2221
2222static bfa_boolean_t
2223bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
2224{
2225 return BFA_TRUE;
2226}
2227
2228static void
2229bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
2230 int buf_lun_cnt)
2231{
2232 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2233 struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
2234 struct scsi_lun lun;
2235 int i, j;
2236
2237 bfa_trc(ioim->bfa, buf_lun_cnt);
2238 for (j = 0; j < buf_lun_cnt; j++) {
2239 lun = *((struct scsi_lun *)(lun_data + j));
2240 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2241 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2242 continue;
2243 if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
2244 (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
2245 (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
2246 == scsilun_to_int((struct scsi_lun *)&lun))) {
2247 lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
2248 break;
2249 }
2250 } /* next lun in mask DB */
2251 } /* next lun in buf */
2252}
2253
2254static int
2255bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
2256 struct scsi_report_luns_data_s *rl)
2257{
2258 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2259 struct scatterlist *sg = scsi_sglist(cmnd);
2260 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2261 struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
2262 int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
2263 int lun_across_sg_bytes, bytes_from_next_buf;
2264 u64 last_lun, temp_last_lun;
2265
2266 /* fetch luns from the first sg element */
2267 bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
2268 (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
2269
2270 /* fetch luns from multiple sg elements */
2271 scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
2272 if (sgeid == 0) {
2273 prev_sg_len = sg_dma_len(sg);
2274 prev_rl_data = (struct scsi_lun *)
2275 phys_to_virt(sg_dma_address(sg));
2276 continue;
2277 }
2278
2279 /* if the buf is having more data */
2280 lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
2281 if (lun_across_sg_bytes) {
2282 bfa_trc(ioim->bfa, lun_across_sg_bytes);
2283 bfa_stats(ioim->itnim, lm_lun_across_sg);
2284 bytes_from_next_buf = sizeof(struct scsi_lun) -
2285 lun_across_sg_bytes;
2286
2287 /* from next buf take higher bytes */
2288 temp_last_lun = *((u64 *)
2289 phys_to_virt(sg_dma_address(sg)));
2290 last_lun |= temp_last_lun >>
2291 (lun_across_sg_bytes * BITS_PER_BYTE);
2292
2293 /* from prev buf take higher bytes */
2294 temp_last_lun = *((u64 *)(prev_rl_data +
2295 (prev_sg_len - lun_across_sg_bytes)));
2296 temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
2297 last_lun = last_lun | (temp_last_lun <<
2298 (bytes_from_next_buf * BITS_PER_BYTE));
2299
2300 bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
2301 } else
2302 bytes_from_next_buf = 0;
2303
2304 *pgdlen += sg_dma_len(sg);
2305 prev_sg_len = sg_dma_len(sg);
2306 prev_rl_data = (struct scsi_lun *)
2307 phys_to_virt(sg_dma_address(sg));
2308 bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
2309 bytes_from_next_buf,
2310 sg_dma_len(sg) / sizeof(struct scsi_lun));
2311 }
2312
2313 /* update the report luns data - based on fetched luns */
2314 sg = scsi_sglist(cmnd);
2315 base_rl_data = (struct scsi_lun *)rl->lun;
2316 base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
2317 for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
2318 if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
2319 base_rl_data[j] = lun_list[i].lun;
2320 lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
2321 j++;
2322 lun_fetched_cnt++;
2323 }
2324
2325 if (j > base_count) {
2326 j = 0;
2327 sg = sg_next(sg);
2328 base_rl_data = (struct scsi_lun *)
2329 phys_to_virt(sg_dma_address(sg));
2330 base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
2331 }
2332 }
2333
2334 bfa_trc(ioim->bfa, lun_fetched_cnt);
2335 return lun_fetched_cnt;
2336}
2337
2338static bfa_boolean_t
2339bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
2340{
2341 struct scsi_inquiry_data_s *inq;
2342 struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
2343
2344 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2345 inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
2346
2347 bfa_trc(ioim->bfa, inq->device_type);
2348 inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
2349 return 0;
2350}
2351
2352static bfa_boolean_t
2353bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
2354{
2355 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2356 struct scatterlist *sg = scsi_sglist(cmnd);
2357 struct bfi_ioim_rsp_s *m;
2358 struct scsi_report_luns_data_s *rl = NULL;
2359 int lun_count = 0, lun_fetched_cnt = 0;
2360 u32 residue, pgdlen = 0;
2361
2362 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2363 if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
2364 return BFA_TRUE;
2365
2366 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2367 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
2368 return BFA_TRUE;
2369
2370 pgdlen = sg_dma_len(sg);
2371 bfa_trc(ioim->bfa, pgdlen);
2372 rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
2373 lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
2374 lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
2375
2376 if (lun_count == lun_fetched_cnt)
2377 return BFA_TRUE;
2378
2379 bfa_trc(ioim->bfa, lun_count);
2380 bfa_trc(ioim->bfa, lun_fetched_cnt);
2381 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2382
2383 if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
2384 rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
2385 sizeof(struct scsi_lun);
2386 else
2387 bfa_stats(ioim->itnim, lm_small_buf_addresidue);
2388
2389 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2390 bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
2391
2392 residue = be32_to_cpu(m->residue);
2393 residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
2394 bfa_stats(ioim->itnim, lm_wire_residue_changed);
2395 m->residue = be32_to_cpu(residue);
2396 bfa_trc(ioim->bfa, ioim->nsges);
2397 return BFA_FALSE;
2398}
2399
2400static void 2113static void
2401__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) 2114__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2402{ 2115{
@@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2454 m->scsi_status, sns_len, snsinfo, residue); 2167 m->scsi_status, sns_len, snsinfo, residue);
2455} 2168}
2456 2169
2457static void
2458__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
2459{
2460 struct bfa_ioim_s *ioim = cbarg;
2461 int sns_len = 0xD;
2462 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2463 struct scsi_sense_s *snsinfo;
2464
2465 if (!complete) {
2466 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2467 return;
2468 }
2469
2470 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2471 ioim->fcpim->fcp, ioim->iotag);
2472 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2473 snsinfo->add_sense_length = 0xa;
2474 snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
2475 snsinfo->sense_key = ILLEGAL_REQUEST;
2476 bfa_trc(ioim->bfa, residue);
2477 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2478 SCSI_STATUS_CHECK_CONDITION, sns_len,
2479 (u8 *)snsinfo, residue);
2480}
2481
2482static void
2483__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
2484{
2485 struct bfa_ioim_s *ioim = cbarg;
2486 int sns_len = 0xD;
2487 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2488 struct scsi_sense_s *snsinfo;
2489
2490 if (!complete) {
2491 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2492 return;
2493 }
2494
2495 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2496 ioim->iotag);
2497 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2498 snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
2499 snsinfo->asc = SCSI_ASC_TOCC;
2500 snsinfo->add_sense_length = 0x6;
2501 snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
2502 bfa_trc(ioim->bfa, residue);
2503 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2504 SCSI_STATUS_CHECK_CONDITION, sns_len,
2505 (u8 *)snsinfo, residue);
2506}
2507
2508static void
2509__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
2510{
2511 struct bfa_ioim_s *ioim = cbarg;
2512 int sns_len = 0xD;
2513 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2514 struct scsi_sense_s *snsinfo;
2515
2516 if (!complete) {
2517 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2518 return;
2519 }
2520
2521 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2522 ioim->fcpim->fcp, ioim->iotag);
2523 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2524 snsinfo->add_sense_length = 0xa;
2525 snsinfo->sense_key = NOT_READY;
2526 snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
2527 snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
2528 bfa_trc(ioim->bfa, residue);
2529 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2530 SCSI_STATUS_CHECK_CONDITION, sns_len,
2531 (u8 *)snsinfo, residue);
2532}
2533
2534void 2170void
2535bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn, 2171bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2536 u16 rp_tag, u8 lp_tag) 2172 u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2647 if (port) { 2283 if (port) {
2648 *pwwn = port->port_cfg.pwwn; 2284 *pwwn = port->port_cfg.pwwn;
2649 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); 2285 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2650 rp = rp_fcs->bfa_rport; 2286 if (rp_fcs)
2287 rp = rp_fcs->bfa_rport;
2651 } 2288 }
2652 2289
2653 lunm_list = bfa_get_lun_mask_list(bfa); 2290 lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2715 if (port) { 2352 if (port) {
2716 *pwwn = port->port_cfg.pwwn; 2353 *pwwn = port->port_cfg.pwwn;
2717 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); 2354 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2718 rp = rp_fcs->bfa_rport; 2355 if (rp_fcs)
2356 rp = rp_fcs->bfa_rport;
2719 } 2357 }
2720 } 2358 }
2721 2359
@@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2757 return; 2395 return;
2758 } 2396 }
2759 2397
2760 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2761 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 2398 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2762 0, 0, NULL, 0); 2399 0, 0, NULL, 0);
2763} 2400}
@@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2773 return; 2410 return;
2774 } 2411 }
2775 2412
2776 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2777 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 2413 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2778 0, 0, NULL, 0); 2414 0, 0, NULL, 0);
2779} 2415}
@@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2788 return; 2424 return;
2789 } 2425 }
2790 2426
2791 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2792 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); 2427 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2793} 2428}
2794 2429
@@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
3132 ioim->bfa = fcpim->bfa; 2767 ioim->bfa = fcpim->bfa;
3133 ioim->fcpim = fcpim; 2768 ioim->fcpim = fcpim;
3134 ioim->iosp = iosp; 2769 ioim->iosp = iosp;
3135 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3136 INIT_LIST_HEAD(&ioim->sgpg_q); 2770 INIT_LIST_HEAD(&ioim->sgpg_q);
3137 bfa_reqq_winit(&ioim->iosp->reqq_wait, 2771 bfa_reqq_winit(&ioim->iosp->reqq_wait,
3138 bfa_ioim_qresume, ioim); 2772 bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3170 evt = BFA_IOIM_SM_DONE; 2804 evt = BFA_IOIM_SM_DONE;
3171 else 2805 else
3172 evt = BFA_IOIM_SM_COMP; 2806 evt = BFA_IOIM_SM_COMP;
3173 ioim->proc_rsp_data(ioim);
3174 break; 2807 break;
3175 2808
3176 case BFI_IOIM_STS_TIMEDOUT: 2809 case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3206 if (rsp->abort_tag != ioim->abort_tag) { 2839 if (rsp->abort_tag != ioim->abort_tag) {
3207 bfa_trc(ioim->bfa, rsp->abort_tag); 2840 bfa_trc(ioim->bfa, rsp->abort_tag);
3208 bfa_trc(ioim->bfa, ioim->abort_tag); 2841 bfa_trc(ioim->bfa, ioim->abort_tag);
3209 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3210 return; 2842 return;
3211 } 2843 }
3212 2844
@@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3225 WARN_ON(1); 2857 WARN_ON(1);
3226 } 2858 }
3227 2859
3228 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3229 bfa_sm_send_event(ioim, evt); 2860 bfa_sm_send_event(ioim, evt);
3230} 2861}
3231 2862
@@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3244 2875
3245 bfa_ioim_cb_profile_comp(fcpim, ioim); 2876 bfa_ioim_cb_profile_comp(fcpim, ioim);
3246 2877
3247 if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) { 2878 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3248 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3249 return;
3250 }
3251
3252 if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
3253 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3254 else
3255 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
3256} 2879}
3257 2880
3258/* 2881/*
@@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
3364void 2987void
3365bfa_ioim_start(struct bfa_ioim_s *ioim) 2988bfa_ioim_start(struct bfa_ioim_s *ioim)
3366{ 2989{
3367 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
3368 struct bfa_lps_s *lps;
3369 enum bfa_ioim_lm_status status;
3370 struct scsi_lun scsilun;
3371
3372 if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
3373 lps = BFA_IOIM_TO_LPS(ioim);
3374 int_to_scsilun(cmnd->device->lun, &scsilun);
3375 status = bfa_ioim_lm_check(ioim, lps,
3376 ioim->itnim->rport, scsilun);
3377 if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
3378 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
3379 bfa_stats(ioim->itnim, lm_lun_not_rdy);
3380 return;
3381 }
3382
3383 if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
3384 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
3385 bfa_stats(ioim->itnim, lm_lun_not_sup);
3386 return;
3387 }
3388
3389 if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
3390 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
3391 bfa_stats(ioim->itnim, lm_rpl_data_changed);
3392 return;
3393 }
3394 }
3395
3396 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2990 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
3397 2991
3398 /* 2992 /*
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 1080bcb81cb..36f26da80f7 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -110,7 +110,6 @@ struct bfad_ioim_s;
110struct bfad_tskim_s; 110struct bfad_tskim_s;
111 111
112typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); 112typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
113typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
114 113
115struct bfa_fcpim_s { 114struct bfa_fcpim_s {
116 struct bfa_s *bfa; 115 struct bfa_s *bfa;
@@ -124,7 +123,6 @@ struct bfa_fcpim_s {
124 u32 path_tov; 123 u32 path_tov;
125 u16 q_depth; 124 u16 q_depth;
126 u8 reqq; /* Request queue to be used */ 125 u8 reqq; /* Request queue to be used */
127 u8 lun_masking_pending;
128 struct list_head itnim_q; /* queue of active itnim */ 126 struct list_head itnim_q; /* queue of active itnim */
129 struct list_head ioim_resfree_q; /* IOs waiting for f/w */ 127 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
130 struct list_head ioim_comp_q; /* IO global comp Q */ 128 struct list_head ioim_comp_q; /* IO global comp Q */
@@ -181,7 +179,6 @@ struct bfa_ioim_s {
181 u8 reqq; /* Request queue for I/O */ 179 u8 reqq; /* Request queue for I/O */
182 u8 mode; /* IO is passthrough or not */ 180 u8 mode; /* IO is passthrough or not */
183 u64 start_time; /* IO's Profile start val */ 181 u64 start_time; /* IO's Profile start val */
184 bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
185}; 182};
186 183
187struct bfa_ioim_sp_s { 184struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@ struct bfa_itnim_s {
261 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ 258 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
262} while (0) 259} while (0)
263 260
264#define BFA_IOIM_TO_LPS(__ioim) \
265 BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
266 __ioim->itnim->rport->rport_info.lp_tag)
267
268static inline bfa_boolean_t 261static inline bfa_boolean_t
269bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) 262bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
270{ 263{
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 95adb86d376..b52cbb6bcd5 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
582#define BFA_LP_TAG_INVALID 0xff 582#define BFA_LP_TAG_INVALID 0xff
583void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); 583void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
584void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); 584void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
585bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
586wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
587struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
588 wwn_t *lpwwn, wwn_t rpwwn);
589void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
590 585
591/* 586/*
592 * bfa fcxp API functions 587 * bfa fcxp API functions
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 66fb72531b3..404fd10ddb2 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
674 674
675 spin_lock_irqsave(&bfad->bfad_lock, flags); 675 spin_lock_irqsave(&bfad->bfad_lock, flags);
676 bfa_fcs_vport_start(&vport->fcs_vport); 676 bfa_fcs_vport_start(&vport->fcs_vport);
677 list_add_tail(&vport->list_entry, &bfad->vport_list);
677 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 678 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
678 679
679 return BFA_STATUS_OK; 680 return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1404 bfad->ref_count = 0; 1405 bfad->ref_count = 0;
1405 bfad->pport.bfad = bfad; 1406 bfad->pport.bfad = bfad;
1406 INIT_LIST_HEAD(&bfad->pbc_vport_list); 1407 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1408 INIT_LIST_HEAD(&bfad->vport_list);
1407 1409
1408 /* Setup the debugfs node for this bfad */ 1410 /* Setup the debugfs node for this bfad */
1409 if (bfa_debugfs_enable) 1411 if (bfa_debugfs_enable)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9d95844ab46..1938fe0473e 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
491 491
492free_scsi_host: 492free_scsi_host:
493 bfad_scsi_host_free(bfad, im_port); 493 bfad_scsi_host_free(bfad, im_port);
494 494 list_del(&vport->list_entry);
495 kfree(vport); 495 kfree(vport);
496 496
497 return 0; 497 return 0;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 06fc00caeb4..530de2b1200 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2394,6 +2394,21 @@ out:
2394 return 0; 2394 return 0;
2395} 2395}
2396 2396
2397/* Function to reset the LUN SCAN mode */
2398static void
2399bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2400{
2401 struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2402 struct bfad_vport_s *vport = NULL;
2403
2404 /* Set the scsi device LUN SCAN flags for base port */
2405 bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2406
2407 /* Set the scsi device LUN SCAN flags for the vports */
2408 list_for_each_entry(vport, &bfad->vport_list, list_entry)
2409 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2410}
2411
2397int 2412int
2398bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) 2413bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2399{ 2414{
@@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2401 unsigned long flags; 2416 unsigned long flags;
2402 2417
2403 spin_lock_irqsave(&bfad->bfad_lock, flags); 2418 spin_lock_irqsave(&bfad->bfad_lock, flags);
2404 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) 2419 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2405 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); 2420 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2406 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) 2421 /* Set the LUN Scanning mode to be Sequential scan */
2422 if (iocmd->status == BFA_STATUS_OK)
2423 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2424 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2407 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); 2425 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2408 else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) 2426 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2427 if (iocmd->status == BFA_STATUS_OK)
2428 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2429 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2409 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); 2430 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2410 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2431 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2411 return 0; 2432 return 0;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 5e19a5f820e..dc5b9d99c45 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
43#include <scsi/scsi_transport_fc.h> 43#include <scsi/scsi_transport_fc.h>
44#include <scsi/scsi_transport.h> 44#include <scsi/scsi_transport.h>
45#include <scsi/scsi_bsg_fc.h> 45#include <scsi/scsi_bsg_fc.h>
46#include <scsi/scsi_devinfo.h>
46 47
47#include "bfa_modules.h" 48#include "bfa_modules.h"
48#include "bfa_fcs.h" 49#include "bfa_fcs.h"
@@ -227,6 +228,7 @@ struct bfad_s {
227 struct list_head active_aen_q; 228 struct list_head active_aen_q;
228 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY]; 229 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
229 spinlock_t bfad_aen_spinlock; 230 spinlock_t bfad_aen_spinlock;
231 struct list_head vport_list;
230}; 232};
231 233
232/* BFAD state machine events */ 234/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index e5db649e8eb..3153923f5b6 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -918,16 +918,70 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
918} 918}
919 919
920/* 920/*
921 * Function is invoked from the SCSI Host Template slave_alloc() entry point.
922 * Has the logic to query the LUN Mask database to check if this LUN needs to
923 * be made visible to the SCSI mid-layer or not.
924 *
925 * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
926 * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
927 */
928static int
929bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
930 struct fc_rport *rport)
931{
932 struct bfad_itnim_data_s *itnim_data =
933 (struct bfad_itnim_data_s *) rport->dd_data;
934 struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
935 struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
936 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
937 int i = 0, ret = -ENXIO;
938
939 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
940 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
941 scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
942 lun_list[i].rp_tag == bfa_rport->rport_tag &&
943 lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
944 ret = BFA_STATUS_OK;
945 break;
946 }
947 }
948 return ret;
949}
950
951/*
921 * Scsi_Host template entry slave_alloc 952 * Scsi_Host template entry slave_alloc
922 */ 953 */
923static int 954static int
924bfad_im_slave_alloc(struct scsi_device *sdev) 955bfad_im_slave_alloc(struct scsi_device *sdev)
925{ 956{
926 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 957 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
958 struct bfad_itnim_data_s *itnim_data =
959 (struct bfad_itnim_data_s *) rport->dd_data;
960 struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
927 961
928 if (!rport || fc_remote_port_chkready(rport)) 962 if (!rport || fc_remote_port_chkready(rport))
929 return -ENXIO; 963 return -ENXIO;
930 964
965 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
966 /*
967 * We should not mask LUN 0 - since this will translate
968 * to no LUN / TARGET for SCSI ml resulting no scan.
969 */
970 if (sdev->lun == 0) {
971 sdev->sdev_bflags |= BLIST_NOREPORTLUN |
972 BLIST_SPARSELUN;
973 goto done;
974 }
975
976 /*
977 * Query LUN Mask configuration - to expose this LUN
978 * to the SCSI mid-layer or to mask it.
979 */
980 if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
981 BFA_STATUS_OK)
982 return -ENXIO;
983 }
984done:
931 sdev->hostdata = rport->dd_data; 985 sdev->hostdata = rport->dd_data;
932 986
933 return 0; 987 return 0;
@@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
1037 && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) 1091 && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
1038 itnim->scsi_tgt_id = fc_rport->scsi_target_id; 1092 itnim->scsi_tgt_id = fc_rport->scsi_target_id;
1039 1093
1094 itnim->channel = fc_rport->channel;
1095
1040 return; 1096 return;
1041} 1097}
1042 1098
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 004b6cf848d..0814367ef10 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -91,6 +91,7 @@ struct bfad_itnim_s {
91 struct fc_rport *fc_rport; 91 struct fc_rport *fc_rport;
92 struct bfa_itnim_s *bfa_itnim; 92 struct bfa_itnim_s *bfa_itnim;
93 u16 scsi_tgt_id; 93 u16 scsi_tgt_id;
94 u16 channel;
94 u16 queue_work; 95 u16 queue_work;
95 unsigned long last_ramp_up_time; 96 unsigned long last_ramp_up_time;
96 unsigned long last_queue_full_time; 97 unsigned long last_queue_full_time;
@@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id);
166int bfad_im_bsg_request(struct fc_bsg_job *job); 167int bfad_im_bsg_request(struct fc_bsg_job *job);
167int bfad_im_bsg_timeout(struct fc_bsg_job *job); 168int bfad_im_bsg_timeout(struct fc_bsg_job *job);
168 169
170/*
171 * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
172 * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
173 *
174 * Internally iterate's over all the ITNIM's part of the im_port & set's the
175 * sdev_bflags for the scsi_device associated with LUN #0.
176 */
177#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \
178 struct scsi_device *__sdev = NULL; \
179 struct bfad_itnim_s *__itnim = NULL; \
180 u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \
181 list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
182 list_entry) { \
183 __sdev = scsi_device_lookup((__im_port)->shost, \
184 __itnim->channel, \
185 __itnim->scsi_tgt_id, 0); \
186 if (__sdev) { \
187 if ((__lunmask_cfg) == BFA_TRUE) \
188 __sdev->sdev_bflags |= scan_flags; \
189 else \
190 __sdev->sdev_bflags &= ~scan_flags; \
191 scsi_device_put(__sdev); \
192 } \
193 } \
194} while (0)
195
169#endif 196#endif
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c5360ffb4be..d3ff9cd4023 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1868,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1868 1868
1869 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); 1869 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1870 if (!tdata->skb) { 1870 if (!tdata->skb) {
1871 pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n", 1871 struct cxgbi_sock *csk = cconn->cep->csk;
1872 cdev->skb_tx_rsvd, headroom, opcode); 1872 struct net_device *ndev = cdev->ports[csk->port_id];
1873 ndev->stats.tx_dropped++;
1873 return -ENOMEM; 1874 return -ENOMEM;
1874 } 1875 }
1875 1876
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4ef021291a4..04c5cea47a2 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev,
466 * Power On, Reset, or Bus Device Reset, just retry. 466 * Power On, Reset, or Bus Device Reset, just retry.
467 */ 467 */
468 return ADD_TO_MLQUEUE; 468 return ADD_TO_MLQUEUE;
469 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
470 /*
471 * Mode Parameters Changed
472 */
473 return ADD_TO_MLQUEUE;
469 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) 474 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
470 /* 475 /*
471 * ALUA state changed 476 * ALUA state changed
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 841ebf4a678..53a31c753cb 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -953,6 +953,8 @@ static int __init rdac_init(void)
953 if (!kmpath_rdacd) { 953 if (!kmpath_rdacd) {
954 scsi_unregister_device_handler(&rdac_dh); 954 scsi_unregister_device_handler(&rdac_dh);
955 printk(KERN_ERR "kmpath_rdacd creation failed.\n"); 955 printk(KERN_ERR "kmpath_rdacd creation failed.\n");
956
957 r = -EINVAL;
956 } 958 }
957done: 959done:
958 return r; 960 return r;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 8d67467dd9c..e9599600aa2 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -58,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ 58MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
59 "Direct Data Placement (DDP)."); 59 "Direct Data Placement (DDP).");
60 60
61DEFINE_MUTEX(fcoe_config_mutex); 61unsigned int fcoe_debug_logging;
62module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
63MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
64
65static DEFINE_MUTEX(fcoe_config_mutex);
62 66
63static struct workqueue_struct *fcoe_wq; 67static struct workqueue_struct *fcoe_wq;
64 68
@@ -67,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion);
67 71
68/* fcoe host list */ 72/* fcoe host list */
69/* must only by accessed under the RTNL mutex */ 73/* must only by accessed under the RTNL mutex */
70LIST_HEAD(fcoe_hostlist); 74static LIST_HEAD(fcoe_hostlist);
71DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 75static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
72 76
73/* Function Prototypes */ 77/* Function Prototypes */
74static int fcoe_reset(struct Scsi_Host *); 78static int fcoe_reset(struct Scsi_Host *);
@@ -157,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
157 .lport_set_port_id = fcoe_set_port_id, 161 .lport_set_port_id = fcoe_set_port_id,
158}; 162};
159 163
160struct fc_function_template fcoe_nport_fc_functions = { 164static struct fc_function_template fcoe_nport_fc_functions = {
161 .show_host_node_name = 1, 165 .show_host_node_name = 1,
162 .show_host_port_name = 1, 166 .show_host_port_name = 1,
163 .show_host_supported_classes = 1, 167 .show_host_supported_classes = 1,
@@ -197,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = {
197 .bsg_request = fc_lport_bsg_request, 201 .bsg_request = fc_lport_bsg_request,
198}; 202};
199 203
200struct fc_function_template fcoe_vport_fc_functions = { 204static struct fc_function_template fcoe_vport_fc_functions = {
201 .show_host_node_name = 1, 205 .show_host_node_name = 1,
202 .show_host_port_name = 1, 206 .show_host_port_name = 1,
203 .show_host_supported_classes = 1, 207 .show_host_supported_classes = 1,
@@ -433,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
433 * 437 *
434 * Caller must be holding the RTNL mutex 438 * Caller must be holding the RTNL mutex
435 */ 439 */
436void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 440static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
437{ 441{
438 struct net_device *netdev = fcoe->netdev; 442 struct net_device *netdev = fcoe->netdev;
439 struct fcoe_ctlr *fip = &fcoe->ctlr; 443 struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -748,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
748 * 752 *
749 * Returns: True for read types I/O, otherwise returns false. 753 * Returns: True for read types I/O, otherwise returns false.
750 */ 754 */
751bool fcoe_oem_match(struct fc_frame *fp) 755static bool fcoe_oem_match(struct fc_frame *fp)
752{ 756{
753 struct fc_frame_header *fh = fc_frame_header_get(fp); 757 struct fc_frame_header *fh = fc_frame_header_get(fp);
754 struct fcp_cmnd *fcp; 758 struct fcp_cmnd *fcp;
@@ -756,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp)
756 if (fc_fcp_is_read(fr_fsp(fp)) && 760 if (fc_fcp_is_read(fr_fsp(fp)) &&
757 (fr_fsp(fp)->data_len > fcoe_ddp_min)) 761 (fr_fsp(fp)->data_len > fcoe_ddp_min))
758 return true; 762 return true;
759 else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) { 763 else if ((fr_fsp(fp) == NULL) &&
764 (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
765 (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
760 fcp = fc_frame_payload_get(fp, sizeof(*fcp)); 766 fcp = fc_frame_payload_get(fp, sizeof(*fcp));
761 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN && 767 if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
762 fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) && 768 (ntohl(fcp->fc_dl) > fcoe_ddp_min))
763 (fcp->fc_flags & FCP_CFL_WRDATA))
764 return true; 769 return true;
765 } 770 }
766 return false; 771 return false;
@@ -1106,7 +1111,7 @@ static int __init fcoe_if_init(void)
1106 * 1111 *
1107 * Returns: 0 on success 1112 * Returns: 0 on success
1108 */ 1113 */
1109int __exit fcoe_if_exit(void) 1114static int __exit fcoe_if_exit(void)
1110{ 1115{
1111 fc_release_transport(fcoe_nport_scsi_transport); 1116 fc_release_transport(fcoe_nport_scsi_transport);
1112 fc_release_transport(fcoe_vport_scsi_transport); 1117 fc_release_transport(fcoe_vport_scsi_transport);
@@ -1295,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void)
1295 * 1300 *
1296 * Returns: 0 for success 1301 * Returns: 0 for success
1297 */ 1302 */
1298int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, 1303static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1299 struct packet_type *ptype, struct net_device *olddev) 1304 struct packet_type *ptype, struct net_device *olddev)
1300{ 1305{
1301 struct fc_lport *lport; 1306 struct fc_lport *lport;
@@ -1451,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
1451 * 1456 *
1452 * Return: 0 for success 1457 * Return: 0 for success
1453 */ 1458 */
1454int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) 1459static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1455{ 1460{
1456 int wlen; 1461 int wlen;
1457 u32 crc; 1462 u32 crc;
@@ -1671,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1671 skb->dev ? skb->dev->name : "<NULL>"); 1676 skb->dev ? skb->dev->name : "<NULL>");
1672 1677
1673 port = lport_priv(lport); 1678 port = lport_priv(lport);
1674 if (skb_is_nonlinear(skb)) 1679 skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
1675 skb_linearize(skb); /* not ideal */
1676 1680
1677 /* 1681 /*
1678 * Frame length checks and setting up the header pointers 1682 * Frame length checks and setting up the header pointers
@@ -1728,7 +1732,7 @@ drop:
1728 * 1732 *
1729 * Return: 0 for success 1733 * Return: 0 for success
1730 */ 1734 */
1731int fcoe_percpu_receive_thread(void *arg) 1735static int fcoe_percpu_receive_thread(void *arg)
1732{ 1736{
1733 struct fcoe_percpu_s *p = arg; 1737 struct fcoe_percpu_s *p = arg;
1734 struct sk_buff *skb; 1738 struct sk_buff *skb;
@@ -2146,7 +2150,7 @@ out_nortnl:
2146 * Returns: 0 if the ethtool query was successful 2150 * Returns: 0 if the ethtool query was successful
2147 * -1 if the ethtool query failed 2151 * -1 if the ethtool query failed
2148 */ 2152 */
2149int fcoe_link_speed_update(struct fc_lport *lport) 2153static int fcoe_link_speed_update(struct fc_lport *lport)
2150{ 2154{
2151 struct net_device *netdev = fcoe_netdev(lport); 2155 struct net_device *netdev = fcoe_netdev(lport);
2152 struct ethtool_cmd ecmd; 2156 struct ethtool_cmd ecmd;
@@ -2180,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
2180 * Returns: 0 if link is UP and OK, -1 if not 2184 * Returns: 0 if link is UP and OK, -1 if not
2181 * 2185 *
2182 */ 2186 */
2183int fcoe_link_ok(struct fc_lport *lport) 2187static int fcoe_link_ok(struct fc_lport *lport)
2184{ 2188{
2185 struct net_device *netdev = fcoe_netdev(lport); 2189 struct net_device *netdev = fcoe_netdev(lport);
2186 2190
@@ -2200,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport)
2200 * there no packets that will be handled by the lport, but also that any 2204 * there no packets that will be handled by the lport, but also that any
2201 * threads already handling packet have returned. 2205 * threads already handling packet have returned.
2202 */ 2206 */
2203void fcoe_percpu_clean(struct fc_lport *lport) 2207static void fcoe_percpu_clean(struct fc_lport *lport)
2204{ 2208{
2205 struct fcoe_percpu_s *pp; 2209 struct fcoe_percpu_s *pp;
2206 struct fcoe_rcv_info *fr; 2210 struct fcoe_rcv_info *fr;
@@ -2251,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport)
2251 * 2255 *
2252 * Returns: Always 0 (return value required by FC transport template) 2256 * Returns: Always 0 (return value required by FC transport template)
2253 */ 2257 */
2254int fcoe_reset(struct Scsi_Host *shost) 2258static int fcoe_reset(struct Scsi_Host *shost)
2255{ 2259{
2256 struct fc_lport *lport = shost_priv(shost); 2260 struct fc_lport *lport = shost_priv(shost);
2257 struct fcoe_port *port = lport_priv(lport); 2261 struct fcoe_port *port = lport_priv(lport);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 6c6884bcf84..bcc89e63949 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,9 +40,7 @@
40#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ 40#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
41#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ 41#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
42 42
43unsigned int fcoe_debug_logging; 43extern unsigned int fcoe_debug_logging;
44module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
46 44
47#define FCOE_LOGGING 0x01 /* General logging, not categorized */ 45#define FCOE_LOGGING 0x01 /* General logging, not categorized */
48#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ 46#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5140f5d0fd6..b96962c3944 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4271,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
4271 remove_ctlr_from_lockup_detector_list(h); 4271 remove_ctlr_from_lockup_detector_list(h);
4272 /* If the list of ctlr's to monitor is empty, stop the thread */ 4272 /* If the list of ctlr's to monitor is empty, stop the thread */
4273 if (list_empty(&hpsa_ctlr_list)) { 4273 if (list_empty(&hpsa_ctlr_list)) {
4274 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4274 kthread_stop(hpsa_lockup_detector); 4275 kthread_stop(hpsa_lockup_detector);
4276 spin_lock_irqsave(&lockup_detector_lock, flags);
4275 hpsa_lockup_detector = NULL; 4277 hpsa_lockup_detector = NULL;
4276 } 4278 }
4277 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4279 spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644
index 5f54461cabc..00000000000
--- a/drivers/scsi/isci/firmware/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
1# Makefile for create_fw
2#
3CC=gcc
4CFLAGS=-c -Wall -O2 -g
5LDFLAGS=
6SOURCES=create_fw.c
7OBJECTS=$(SOURCES:.cpp=.o)
8EXECUTABLE=create_fw
9
10all: $(SOURCES) $(EXECUTABLE)
11
12$(EXECUTABLE): $(OBJECTS)
13 $(CC) $(LDFLAGS) $(OBJECTS) -o $@
14
15.c.o:
16 $(CC) $(CFLAGS) $< -O $@
17
18clean:
19 rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
deleted file mode 100644
index 8056d2bd233..00000000000
--- a/drivers/scsi/isci/firmware/README
+++ /dev/null
@@ -1,36 +0,0 @@
1This defines the temporary binary blow we are to pass to the SCU
2driver to emulate the binary firmware that we will eventually be
3able to access via NVRAM on the SCU controller.
4
5The current size of the binary blob is expected to be 149 bytes or larger
6
7Header Types:
80x1: Phy Masks
90x2: Phy Gens
100x3: SAS Addrs
110xff: End of Data
12
13ID string - u8[12]: "#SCU MAGIC#\0"
14Version - u8: 1
15SubVersion - u8: 0
16
17Header Type - u8: 0x1
18Size - u8: 8
19Phy Mask - u32[8]
20
21Header Type - u8: 0x2
22Size - u8: 8
23Phy Gen - u32[8]
24
25Header Type - u8: 0x3
26Size - u8: 8
27Sas Addr - u64[8]
28
29Header Type - u8: 0xf
30
31
32==============================================================================
33
34Place isci_firmware.bin in /lib/firmware
35Be sure to recreate the initramfs image to include the firmware.
36
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644
index c7a2887a7e9..00000000000
--- a/drivers/scsi/isci/firmware/create_fw.c
+++ /dev/null
@@ -1,99 +0,0 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <fcntl.h>
7#include <string.h>
8#include <errno.h>
9#include <asm/types.h>
10#include <strings.h>
11#include <stdint.h>
12
13#include "create_fw.h"
14#include "../probe_roms.h"
15
16int write_blob(struct isci_orom *isci_orom)
17{
18 FILE *fd;
19 int err;
20 size_t count;
21
22 fd = fopen(blob_name, "w+");
23 if (!fd) {
24 perror("Open file for write failed");
25 fclose(fd);
26 return -EIO;
27 }
28
29 count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
30 if (count != 1) {
31 perror("Write data failed");
32 fclose(fd);
33 return -EIO;
34 }
35
36 fclose(fd);
37
38 return 0;
39}
40
41void set_binary_values(struct isci_orom *isci_orom)
42{
43 int ctrl_idx, phy_idx, port_idx;
44
45 /* setting OROM signature */
46 strncpy(isci_orom->hdr.signature, sig, strlen(sig));
47 isci_orom->hdr.version = version;
48 isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
49 isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
50 isci_orom->hdr.num_elements = num_elements;
51
52 for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
53 isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
54 isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
55 max_num_concurrent_dev_spin_up;
56 isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
57 enable_ssc;
58
59 for (port_idx = 0; port_idx < 4; port_idx++)
60 isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
61 phy_mask[ctrl_idx][port_idx];
62
63 for (phy_idx = 0; phy_idx < 4; phy_idx++) {
64 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
65 (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
66 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
67 (__u32)(sas_addr[ctrl_idx][phy_idx]);
68
69 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
70 afe_tx_amp_control0;
71 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
72 afe_tx_amp_control1;
73 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
74 afe_tx_amp_control2;
75 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
76 afe_tx_amp_control3;
77 }
78 }
79}
80
81int main(void)
82{
83 int err;
84 struct isci_orom *isci_orom;
85
86 isci_orom = malloc(sizeof(struct isci_orom));
87 memset(isci_orom, 0, sizeof(struct isci_orom));
88
89 set_binary_values(isci_orom);
90
91 err = write_blob(isci_orom);
92 if (err < 0) {
93 free(isci_orom);
94 return err;
95 }
96
97 free(isci_orom);
98 return 0;
99}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644
index 5f298828d22..00000000000
--- a/drivers/scsi/isci/firmware/create_fw.h
+++ /dev/null
@@ -1,77 +0,0 @@
1#ifndef _CREATE_FW_H_
2#define _CREATE_FW_H_
3#include "../probe_roms.h"
4
5
6/* we are configuring for 2 SCUs */
7static const int num_elements = 2;
8
9/*
10 * For all defined arrays:
11 * elements 0-3 are for SCU0, ports 0-3
12 * elements 4-7 are for SCU1, ports 0-3
13 *
14 * valid configurations for one SCU are:
15 * P0 P1 P2 P3
16 * ----------------
17 * 0xF,0x0,0x0,0x0 # 1 x4 port
18 * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
19 * # ports
20 * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
21 * # port
22 * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
23 * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
24 *
25 * if there is a port/phy on which you do not wish to override the default
26 * values, use the value assigned to UNINIT_PARAM (255).
27 */
28
29/* discovery mode type (port auto config mode by default ) */
30
31/*
32 * if there is a port/phy on which you do not wish to override the default
33 * values, use the value "0000000000000000". SAS address of zero's is
34 * considered invalid and will not be used.
35 */
36#ifdef MPC
37static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
38static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
39 {1, 2, 4, 8} };
40static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
41 0x5FCFFFFFF0000002ULL,
42 0x5FCFFFFFF0000003ULL,
43 0x5FCFFFFFF0000004ULL },
44 { 0x5FCFFFFFF0000005ULL,
45 0x5FCFFFFFF0000006ULL,
46 0x5FCFFFFFF0000007ULL,
47 0x5FCFFFFFF0000008ULL } };
48#else /* APC (default) */
49static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
50static const __u8 phy_mask[2][4];
51static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
52 0x5FCFFFFF00000001ULL,
53 0x5FCFFFFF00000001ULL,
54 0x5FCFFFFF00000001ULL },
55 { 0x5FCFFFFF00000002ULL,
56 0x5FCFFFFF00000002ULL,
57 0x5FCFFFFF00000002ULL,
58 0x5FCFFFFF00000002ULL } };
59#endif
60
61/* Maximum number of concurrent device spin up */
62static const int max_num_concurrent_dev_spin_up = 1;
63
64/* enable of ssc operation */
65static const int enable_ssc;
66
67/* AFE_TX_AMP_CONTROL */
68static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
69static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
70static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
71static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
72
73static const char blob_name[] = "isci_firmware.bin";
74static const char sig[] = "ISCUOEMB";
75static const unsigned char version = 0x10;
76
77#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index e7fe9c4c85b..1a65d651423 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
899 */ 899 */
900 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || 900 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
901 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || 901 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
902 (iphy->is_in_link_training == true && is_phy_starting(iphy))) { 902 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
903 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
903 is_controller_start_complete = false; 904 is_controller_start_complete = false;
904 break; 905 break;
905 } 906 }
@@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
1666 /* Default to no SSC operation. */ 1667 /* Default to no SSC operation. */
1667 ihost->oem_parameters.controller.do_enable_ssc = false; 1668 ihost->oem_parameters.controller.do_enable_ssc = false;
1668 1669
1670 /* Default to short cables on all phys. */
1671 ihost->oem_parameters.controller.cable_selection_mask = 0;
1672
1669 /* Initialize all of the port parameter information to narrow ports. */ 1673 /* Initialize all of the port parameter information to narrow ports. */
1670 for (index = 0; index < SCI_MAX_PORTS; index++) { 1674 for (index = 0; index < SCI_MAX_PORTS; index++) {
1671 ihost->oem_parameters.ports[index].phy_mask = 0; 1675 ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
1673 1677
1674 /* Initialize all of the phy parameter information. */ 1678 /* Initialize all of the phy parameter information. */
1675 for (index = 0; index < SCI_MAX_PHYS; index++) { 1679 for (index = 0; index < SCI_MAX_PHYS; index++) {
1676 /* Default to 6G (i.e. Gen 3) for now. */ 1680 /* Default to 3G (i.e. Gen 2). */
1677 ihost->user_parameters.phys[index].max_speed_generation = 3; 1681 ihost->user_parameters.phys[index].max_speed_generation =
1682 SCIC_SDS_PARM_GEN2_SPEED;
1678 1683
1679 /* the frequencies cannot be 0 */ 1684 /* the frequencies cannot be 0 */
1680 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; 1685 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
1694 ihost->user_parameters.ssp_inactivity_timeout = 5; 1699 ihost->user_parameters.ssp_inactivity_timeout = 5;
1695 ihost->user_parameters.stp_max_occupancy_timeout = 5; 1700 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1696 ihost->user_parameters.ssp_max_occupancy_timeout = 20; 1701 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1697 ihost->user_parameters.no_outbound_task_timeout = 20; 1702 ihost->user_parameters.no_outbound_task_timeout = 2;
1698} 1703}
1699 1704
1700static void controller_timeout(unsigned long data) 1705static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
1759 return sci_controller_reset(ihost); 1764 return sci_controller_reset(ihost);
1760} 1765}
1761 1766
1762int sci_oem_parameters_validate(struct sci_oem_params *oem) 1767int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1763{ 1768{
1764 int i; 1769 int i;
1765 1770
@@ -1791,18 +1796,61 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
1791 oem->controller.max_concurr_spin_up < 1) 1796 oem->controller.max_concurr_spin_up < 1)
1792 return -EINVAL; 1797 return -EINVAL;
1793 1798
1799 if (oem->controller.do_enable_ssc) {
1800 if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
1801 return -EINVAL;
1802
1803 if (version >= ISCI_ROM_VER_1_1) {
1804 u8 test = oem->controller.ssc_sata_tx_spread_level;
1805
1806 switch (test) {
1807 case 0:
1808 case 2:
1809 case 3:
1810 case 6:
1811 case 7:
1812 break;
1813 default:
1814 return -EINVAL;
1815 }
1816
1817 test = oem->controller.ssc_sas_tx_spread_level;
1818 if (oem->controller.ssc_sas_tx_type == 0) {
1819 switch (test) {
1820 case 0:
1821 case 2:
1822 case 3:
1823 break;
1824 default:
1825 return -EINVAL;
1826 }
1827 } else if (oem->controller.ssc_sas_tx_type == 1) {
1828 switch (test) {
1829 case 0:
1830 case 3:
1831 case 6:
1832 break;
1833 default:
1834 return -EINVAL;
1835 }
1836 }
1837 }
1838 }
1839
1794 return 0; 1840 return 0;
1795} 1841}
1796 1842
1797static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) 1843static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1798{ 1844{
1799 u32 state = ihost->sm.current_state_id; 1845 u32 state = ihost->sm.current_state_id;
1846 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
1800 1847
1801 if (state == SCIC_RESET || 1848 if (state == SCIC_RESET ||
1802 state == SCIC_INITIALIZING || 1849 state == SCIC_INITIALIZING ||
1803 state == SCIC_INITIALIZED) { 1850 state == SCIC_INITIALIZED) {
1804 1851
1805 if (sci_oem_parameters_validate(&ihost->oem_parameters)) 1852 if (sci_oem_parameters_validate(&ihost->oem_parameters,
1853 pci_info->orom->hdr.version))
1806 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1854 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1807 1855
1808 return SCI_SUCCESS; 1856 return SCI_SUCCESS;
@@ -1857,6 +1905,31 @@ static void power_control_timeout(unsigned long data)
1857 ihost->power_control.phys_waiting--; 1905 ihost->power_control.phys_waiting--;
1858 ihost->power_control.phys_granted_power++; 1906 ihost->power_control.phys_granted_power++;
1859 sci_phy_consume_power_handler(iphy); 1907 sci_phy_consume_power_handler(iphy);
1908
1909 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
1910 u8 j;
1911
1912 for (j = 0; j < SCI_MAX_PHYS; j++) {
1913 struct isci_phy *requester = ihost->power_control.requesters[j];
1914
1915 /*
1916 * Search the power_control queue to see if there are other phys
1917 * attached to the same remote device. If found, take all of
1918 * them out of await_sas_power state.
1919 */
1920 if (requester != NULL && requester != iphy) {
1921 u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
1922 iphy->frame_rcvd.iaf.sas_addr,
1923 sizeof(requester->frame_rcvd.iaf.sas_addr));
1924
1925 if (other == 0) {
1926 ihost->power_control.requesters[j] = NULL;
1927 ihost->power_control.phys_waiting--;
1928 sci_phy_consume_power_handler(requester);
1929 }
1930 }
1931 }
1932 }
1860 } 1933 }
1861 1934
1862 /* 1935 /*
@@ -1891,9 +1964,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1891 ihost->power_control.timer_started = true; 1964 ihost->power_control.timer_started = true;
1892 1965
1893 } else { 1966 } else {
1894 /* Add the phy in the waiting list */ 1967 /*
1895 ihost->power_control.requesters[iphy->phy_index] = iphy; 1968 * There are phys, attached to the same sas address as this phy, are
1896 ihost->power_control.phys_waiting++; 1969 * already in READY state, this phy don't need wait.
1970 */
1971 u8 i;
1972 struct isci_phy *current_phy;
1973
1974 for (i = 0; i < SCI_MAX_PHYS; i++) {
1975 u8 other;
1976 current_phy = &ihost->phys[i];
1977
1978 other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
1979 iphy->frame_rcvd.iaf.sas_addr,
1980 sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1981
1982 if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1983 current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
1984 other == 0) {
1985 sci_phy_consume_power_handler(iphy);
1986 break;
1987 }
1988 }
1989
1990 if (i == SCI_MAX_PHYS) {
1991 /* Add the phy in the waiting list */
1992 ihost->power_control.requesters[iphy->phy_index] = iphy;
1993 ihost->power_control.phys_waiting++;
1994 }
1897 } 1995 }
1898} 1996}
1899 1997
@@ -1908,162 +2006,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1908 ihost->power_control.requesters[iphy->phy_index] = NULL; 2006 ihost->power_control.requesters[iphy->phy_index] = NULL;
1909} 2007}
1910 2008
2009static int is_long_cable(int phy, unsigned char selection_byte)
2010{
2011 return !!(selection_byte & (1 << phy));
2012}
2013
2014static int is_medium_cable(int phy, unsigned char selection_byte)
2015{
2016 return !!(selection_byte & (1 << (phy + 4)));
2017}
2018
2019static enum cable_selections decode_selection_byte(
2020 int phy,
2021 unsigned char selection_byte)
2022{
2023 return ((selection_byte & (1 << phy)) ? 1 : 0)
2024 + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
2025}
2026
2027static unsigned char *to_cable_select(struct isci_host *ihost)
2028{
2029 if (is_cable_select_overridden())
2030 return ((unsigned char *)&cable_selection_override)
2031 + ihost->id;
2032 else
2033 return &ihost->oem_parameters.controller.cable_selection_mask;
2034}
2035
2036enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
2037{
2038 return decode_selection_byte(phy, *to_cable_select(ihost));
2039}
2040
2041char *lookup_cable_names(enum cable_selections selection)
2042{
2043 static char *cable_names[] = {
2044 [short_cable] = "short",
2045 [long_cable] = "long",
2046 [medium_cable] = "medium",
2047 [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
2048 };
2049 return (selection <= undefined_cable) ? cable_names[selection]
2050 : cable_names[undefined_cable];
2051}
2052
1911#define AFE_REGISTER_WRITE_DELAY 10 2053#define AFE_REGISTER_WRITE_DELAY 10
1912 2054
1913/* Initialize the AFE for this phy index. We need to read the AFE setup from
1914 * the OEM parameters
1915 */
1916static void sci_controller_afe_initialization(struct isci_host *ihost) 2055static void sci_controller_afe_initialization(struct isci_host *ihost)
1917{ 2056{
2057 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1918 const struct sci_oem_params *oem = &ihost->oem_parameters; 2058 const struct sci_oem_params *oem = &ihost->oem_parameters;
1919 struct pci_dev *pdev = ihost->pdev; 2059 struct pci_dev *pdev = ihost->pdev;
1920 u32 afe_status; 2060 u32 afe_status;
1921 u32 phy_id; 2061 u32 phy_id;
2062 unsigned char cable_selection_mask = *to_cable_select(ihost);
1922 2063
1923 /* Clear DFX Status registers */ 2064 /* Clear DFX Status registers */
1924 writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0); 2065 writel(0x0081000f, &afe->afe_dfx_master_control0);
1925 udelay(AFE_REGISTER_WRITE_DELAY); 2066 udelay(AFE_REGISTER_WRITE_DELAY);
1926 2067
1927 if (is_b0(pdev)) { 2068 if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
1928 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement 2069 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1929 * Timer, PM Stagger Timer */ 2070 * Timer, PM Stagger Timer
1930 writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2); 2071 */
2072 writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
1931 udelay(AFE_REGISTER_WRITE_DELAY); 2073 udelay(AFE_REGISTER_WRITE_DELAY);
1932 } 2074 }
1933 2075
1934 /* Configure bias currents to normal */ 2076 /* Configure bias currents to normal */
1935 if (is_a2(pdev)) 2077 if (is_a2(pdev))
1936 writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control); 2078 writel(0x00005A00, &afe->afe_bias_control);
1937 else if (is_b0(pdev) || is_c0(pdev)) 2079 else if (is_b0(pdev) || is_c0(pdev))
1938 writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control); 2080 writel(0x00005F00, &afe->afe_bias_control);
2081 else if (is_c1(pdev))
2082 writel(0x00005500, &afe->afe_bias_control);
1939 2083
1940 udelay(AFE_REGISTER_WRITE_DELAY); 2084 udelay(AFE_REGISTER_WRITE_DELAY);
1941 2085
1942 /* Enable PLL */ 2086 /* Enable PLL */
1943 if (is_b0(pdev) || is_c0(pdev)) 2087 if (is_a2(pdev))
1944 writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0); 2088 writel(0x80040908, &afe->afe_pll_control0);
1945 else 2089 else if (is_b0(pdev) || is_c0(pdev))
1946 writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0); 2090 writel(0x80040A08, &afe->afe_pll_control0);
2091 else if (is_c1(pdev)) {
2092 writel(0x80000B08, &afe->afe_pll_control0);
2093 udelay(AFE_REGISTER_WRITE_DELAY);
2094 writel(0x00000B08, &afe->afe_pll_control0);
2095 udelay(AFE_REGISTER_WRITE_DELAY);
2096 writel(0x80000B08, &afe->afe_pll_control0);
2097 }
1947 2098
1948 udelay(AFE_REGISTER_WRITE_DELAY); 2099 udelay(AFE_REGISTER_WRITE_DELAY);
1949 2100
1950 /* Wait for the PLL to lock */ 2101 /* Wait for the PLL to lock */
1951 do { 2102 do {
1952 afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status); 2103 afe_status = readl(&afe->afe_common_block_status);
1953 udelay(AFE_REGISTER_WRITE_DELAY); 2104 udelay(AFE_REGISTER_WRITE_DELAY);
1954 } while ((afe_status & 0x00001000) == 0); 2105 } while ((afe_status & 0x00001000) == 0);
1955 2106
1956 if (is_a2(pdev)) { 2107 if (is_a2(pdev)) {
1957 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ 2108 /* Shorten SAS SNW lock time (RxLock timer value from 76
1958 writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0); 2109 * us to 50 us)
2110 */
2111 writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
1959 udelay(AFE_REGISTER_WRITE_DELAY); 2112 udelay(AFE_REGISTER_WRITE_DELAY);
1960 } 2113 }
1961 2114
1962 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { 2115 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
2116 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
1963 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; 2117 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
2118 int cable_length_long =
2119 is_long_cable(phy_id, cable_selection_mask);
2120 int cable_length_medium =
2121 is_medium_cable(phy_id, cable_selection_mask);
1964 2122
1965 if (is_b0(pdev)) { 2123 if (is_a2(pdev)) {
1966 /* Configure transmitter SSC parameters */ 2124 /* All defaults, except the Receive Word
1967 writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 2125 * Alignament/Comma Detect Enable....(0xe800)
2126 */
2127 writel(0x00004512, &xcvr->afe_xcvr_control0);
2128 udelay(AFE_REGISTER_WRITE_DELAY);
2129
2130 writel(0x0050100F, &xcvr->afe_xcvr_control1);
2131 udelay(AFE_REGISTER_WRITE_DELAY);
2132 } else if (is_b0(pdev)) {
2133 /* Configure transmitter SSC parameters */
2134 writel(0x00030000, &xcvr->afe_tx_ssc_control);
1968 udelay(AFE_REGISTER_WRITE_DELAY); 2135 udelay(AFE_REGISTER_WRITE_DELAY);
1969 } else if (is_c0(pdev)) { 2136 } else if (is_c0(pdev)) {
1970 /* Configure transmitter SSC parameters */ 2137 /* Configure transmitter SSC parameters */
1971 writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 2138 writel(0x00010202, &xcvr->afe_tx_ssc_control);
1972 udelay(AFE_REGISTER_WRITE_DELAY); 2139 udelay(AFE_REGISTER_WRITE_DELAY);
1973 2140
1974 /* 2141 /* All defaults, except the Receive Word
1975 * All defaults, except the Receive Word Alignament/Comma Detect 2142 * Alignament/Comma Detect Enable....(0xe800)
1976 * Enable....(0xe800) */ 2143 */
1977 writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 2144 writel(0x00014500, &xcvr->afe_xcvr_control0);
1978 udelay(AFE_REGISTER_WRITE_DELAY); 2145 udelay(AFE_REGISTER_WRITE_DELAY);
1979 } else { 2146 } else if (is_c1(pdev)) {
1980 /* 2147 /* Configure transmitter SSC parameters */
1981 * All defaults, except the Receive Word Alignament/Comma Detect 2148 writel(0x00010202, &xcvr->afe_tx_ssc_control);
1982 * Enable....(0xe800) */
1983 writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1984 udelay(AFE_REGISTER_WRITE_DELAY); 2149 udelay(AFE_REGISTER_WRITE_DELAY);
1985 2150
1986 writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); 2151 /* All defaults, except the Receive Word
2152 * Alignament/Comma Detect Enable....(0xe800)
2153 */
2154 writel(0x0001C500, &xcvr->afe_xcvr_control0);
1987 udelay(AFE_REGISTER_WRITE_DELAY); 2155 udelay(AFE_REGISTER_WRITE_DELAY);
1988 } 2156 }
1989 2157
1990 /* 2158 /* Power up TX and RX out from power down (PWRDNTX and
1991 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 2159 * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
1992 * & increase TX int & ext bias 20%....(0xe85c) */ 2160 */
1993 if (is_a2(pdev)) 2161 if (is_a2(pdev))
1994 writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 2162 writel(0x000003F0, &xcvr->afe_channel_control);
1995 else if (is_b0(pdev)) { 2163 else if (is_b0(pdev)) {
1996 /* Power down TX and RX (PWRDNTX and PWRDNRX) */ 2164 writel(0x000003D7, &xcvr->afe_channel_control);
1997 writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1998 udelay(AFE_REGISTER_WRITE_DELAY); 2165 udelay(AFE_REGISTER_WRITE_DELAY);
1999 2166
2000 /* 2167 writel(0x000003D4, &xcvr->afe_channel_control);
2001 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 2168 } else if (is_c0(pdev)) {
2002 * & increase TX int & ext bias 20%....(0xe85c) */ 2169 writel(0x000001E7, &xcvr->afe_channel_control);
2003 writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2004 } else {
2005 writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2006 udelay(AFE_REGISTER_WRITE_DELAY); 2170 udelay(AFE_REGISTER_WRITE_DELAY);
2007 2171
2008 /* 2172 writel(0x000001E4, &xcvr->afe_channel_control);
2009 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 2173 } else if (is_c1(pdev)) {
2010 * & increase TX int & ext bias 20%....(0xe85c) */ 2174 writel(cable_length_long ? 0x000002F7 : 0x000001F7,
2011 writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 2175 &xcvr->afe_channel_control);
2176 udelay(AFE_REGISTER_WRITE_DELAY);
2177
2178 writel(cable_length_long ? 0x000002F4 : 0x000001F4,
2179 &xcvr->afe_channel_control);
2012 } 2180 }
2013 udelay(AFE_REGISTER_WRITE_DELAY); 2181 udelay(AFE_REGISTER_WRITE_DELAY);
2014 2182
2015 if (is_a2(pdev)) { 2183 if (is_a2(pdev)) {
2016 /* Enable TX equalization (0xe824) */ 2184 /* Enable TX equalization (0xe824) */
2017 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2185 writel(0x00040000, &xcvr->afe_tx_control);
2018 udelay(AFE_REGISTER_WRITE_DELAY); 2186 udelay(AFE_REGISTER_WRITE_DELAY);
2019 } 2187 }
2020 2188
2021 /* 2189 if (is_a2(pdev) || is_b0(pdev))
2022 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), 2190 /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
2023 * RDD=0x0(RX Detect Enabled) ....(0xe800) */ 2191 * TPD=0x0(TX Power On), RDD=0x0(RX Detect
2024 writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 2192 * Enabled) ....(0xe800)
2193 */
2194 writel(0x00004100, &xcvr->afe_xcvr_control0);
2195 else if (is_c0(pdev))
2196 writel(0x00014100, &xcvr->afe_xcvr_control0);
2197 else if (is_c1(pdev))
2198 writel(0x0001C100, &xcvr->afe_xcvr_control0);
2025 udelay(AFE_REGISTER_WRITE_DELAY); 2199 udelay(AFE_REGISTER_WRITE_DELAY);
2026 2200
2027 /* Leave DFE/FFE on */ 2201 /* Leave DFE/FFE on */
2028 if (is_a2(pdev)) 2202 if (is_a2(pdev))
2029 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2203 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2030 else if (is_b0(pdev)) { 2204 else if (is_b0(pdev)) {
2031 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2205 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2032 udelay(AFE_REGISTER_WRITE_DELAY); 2206 udelay(AFE_REGISTER_WRITE_DELAY);
2033 /* Enable TX equalization (0xe824) */ 2207 /* Enable TX equalization (0xe824) */
2034 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2208 writel(0x00040000, &xcvr->afe_tx_control);
2035 } else { 2209 } else if (is_c0(pdev)) {
2036 writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); 2210 writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
2211 udelay(AFE_REGISTER_WRITE_DELAY);
2212
2213 writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
2214 udelay(AFE_REGISTER_WRITE_DELAY);
2215
2216 /* Enable TX equalization (0xe824) */
2217 writel(0x00040000, &xcvr->afe_tx_control);
2218 } else if (is_c1(pdev)) {
2219 writel(cable_length_long ? 0x01500C0C :
2220 cable_length_medium ? 0x01400C0D : 0x02400C0D,
2221 &xcvr->afe_xcvr_control1);
2222 udelay(AFE_REGISTER_WRITE_DELAY);
2223
2224 writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
2037 udelay(AFE_REGISTER_WRITE_DELAY); 2225 udelay(AFE_REGISTER_WRITE_DELAY);
2038 2226
2039 writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2227 writel(cable_length_long ? 0x33091C1F :
2228 cable_length_medium ? 0x3315181F : 0x2B17161F,
2229 &xcvr->afe_rx_ssc_control0);
2040 udelay(AFE_REGISTER_WRITE_DELAY); 2230 udelay(AFE_REGISTER_WRITE_DELAY);
2041 2231
2042 /* Enable TX equalization (0xe824) */ 2232 /* Enable TX equalization (0xe824) */
2043 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2233 writel(0x00040000, &xcvr->afe_tx_control);
2044 } 2234 }
2045 2235
2046 udelay(AFE_REGISTER_WRITE_DELAY); 2236 udelay(AFE_REGISTER_WRITE_DELAY);
2047 2237
2048 writel(oem_phy->afe_tx_amp_control0, 2238 writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
2049 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2050 udelay(AFE_REGISTER_WRITE_DELAY); 2239 udelay(AFE_REGISTER_WRITE_DELAY);
2051 2240
2052 writel(oem_phy->afe_tx_amp_control1, 2241 writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
2053 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2054 udelay(AFE_REGISTER_WRITE_DELAY); 2242 udelay(AFE_REGISTER_WRITE_DELAY);
2055 2243
2056 writel(oem_phy->afe_tx_amp_control2, 2244 writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
2057 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2058 udelay(AFE_REGISTER_WRITE_DELAY); 2245 udelay(AFE_REGISTER_WRITE_DELAY);
2059 2246
2060 writel(oem_phy->afe_tx_amp_control3, 2247 writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
2061 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2062 udelay(AFE_REGISTER_WRITE_DELAY); 2248 udelay(AFE_REGISTER_WRITE_DELAY);
2063 } 2249 }
2064 2250
2065 /* Transfer control to the PEs */ 2251 /* Transfer control to the PEs */
2066 writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0); 2252 writel(0x00010f00, &afe->afe_dfx_master_control0);
2067 udelay(AFE_REGISTER_WRITE_DELAY); 2253 udelay(AFE_REGISTER_WRITE_DELAY);
2068} 2254}
2069 2255
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 646051afd3c..5477f0fa823 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev)
435 435
436static inline bool is_c0(struct pci_dev *pdev) 436static inline bool is_c0(struct pci_dev *pdev)
437{ 437{
438 if (pdev->revision >= 5) 438 if (pdev->revision == 5)
439 return true; 439 return true;
440 return false; 440 return false;
441} 441}
442 442
443static inline bool is_c1(struct pci_dev *pdev)
444{
445 if (pdev->revision >= 6)
446 return true;
447 return false;
448}
449
450enum cable_selections {
451 short_cable = 0,
452 long_cable = 1,
453 medium_cable = 2,
454 undefined_cable = 3
455};
456
457#define CABLE_OVERRIDE_DISABLED (0x10000)
458
459static inline int is_cable_select_overridden(void)
460{
461 return cable_selection_override < CABLE_OVERRIDE_DISABLED;
462}
463
464enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
465void validate_cable_selections(struct isci_host *ihost);
466char *lookup_cable_names(enum cable_selections);
467
443/* set hw control for 'activity', even though active enclosures seem to drive 468/* set hw control for 'activity', even though active enclosures seem to drive
444 * the activity led on their own. Skip setting FSENG control on 'status' due 469 * the activity led on their own. Skip setting FSENG control on 'status' due
445 * to unexpected operation and 'error' due to not being a supported automatic 470 * to unexpected operation and 'error' due to not being a supported automatic
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index a97edabcb85..17c4c2c89c2 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -65,7 +65,7 @@
65#include "probe_roms.h" 65#include "probe_roms.h"
66 66
67#define MAJ 1 67#define MAJ 1
68#define MIN 0 68#define MIN 1
69#define BUILD 0 69#define BUILD 0
70#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 70#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
71 __stringify(BUILD) 71 __stringify(BUILD)
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
94 94
95/* linux isci specific settings */ 95/* linux isci specific settings */
96 96
97unsigned char no_outbound_task_to = 20; 97unsigned char no_outbound_task_to = 2;
98module_param(no_outbound_task_to, byte, 0); 98module_param(no_outbound_task_to, byte, 0);
99MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); 99MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
100 100
@@ -114,7 +114,7 @@ u16 stp_inactive_to = 5;
114module_param(stp_inactive_to, ushort, 0); 114module_param(stp_inactive_to, ushort, 0);
115MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); 115MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
116 116
117unsigned char phy_gen = 3; 117unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
118module_param(phy_gen, byte, 0); 118module_param(phy_gen, byte, 0);
119MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); 119MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
120 120
@@ -122,6 +122,14 @@ unsigned char max_concurr_spinup;
122module_param(max_concurr_spinup, byte, 0); 122module_param(max_concurr_spinup, byte, 0);
123MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); 123MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
124 124
125uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
126module_param(cable_selection_override, uint, 0);
127
128MODULE_PARM_DESC(cable_selection_override,
129 "This field indicates length of the SAS/SATA cable between "
130 "host and device. If any bits > 15 are set (default) "
131 "indicates \"use platform defaults\"");
132
125static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) 133static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
126{ 134{
127 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); 135 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
412 return NULL; 420 return NULL;
413 isci_host->shost = shost; 421 isci_host->shost = shost;
414 422
423 dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
424 "{%s, %s, %s, %s}\n",
425 (is_cable_select_overridden() ? "* " : ""), isci_host->id,
426 lookup_cable_names(decode_cable_selection(isci_host, 3)),
427 lookup_cable_names(decode_cable_selection(isci_host, 2)),
428 lookup_cable_names(decode_cable_selection(isci_host, 1)),
429 lookup_cable_names(decode_cable_selection(isci_host, 0)));
430
415 err = isci_host_init(isci_host); 431 err = isci_host_init(isci_host);
416 if (err) 432 if (err)
417 goto err_shost; 433 goto err_shost;
@@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
466 orom = isci_request_oprom(pdev); 482 orom = isci_request_oprom(pdev);
467 483
468 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { 484 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
469 if (sci_oem_parameters_validate(&orom->ctrl[i])) { 485 if (sci_oem_parameters_validate(&orom->ctrl[i],
486 orom->hdr.version)) {
470 dev_warn(&pdev->dev, 487 dev_warn(&pdev->dev,
471 "[%d]: invalid oem parameters detected, falling back to firmware\n", i); 488 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
472 devm_kfree(&pdev->dev, orom); 489 devm_kfree(&pdev->dev, orom);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 8efeb6b0832..234ab46fce3 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -480,6 +480,7 @@ extern u16 ssp_inactive_to;
480extern u16 stp_inactive_to; 480extern u16 stp_inactive_to;
481extern unsigned char phy_gen; 481extern unsigned char phy_gen;
482extern unsigned char max_concurr_spinup; 482extern unsigned char max_concurr_spinup;
483extern uint cable_selection_override;
483 484
484irqreturn_t isci_msix_isr(int vec, void *data); 485irqreturn_t isci_msix_isr(int vec, void *data);
485irqreturn_t isci_intx_isr(int vec, void *data); 486irqreturn_t isci_intx_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 35f50c2183e..fe18acfd6eb 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy,
91 91
92static enum sci_status 92static enum sci_status
93sci_phy_link_layer_initialization(struct isci_phy *iphy, 93sci_phy_link_layer_initialization(struct isci_phy *iphy,
94 struct scu_link_layer_registers __iomem *reg) 94 struct scu_link_layer_registers __iomem *llr)
95{ 95{
96 struct isci_host *ihost = iphy->owning_port->owning_controller; 96 struct isci_host *ihost = iphy->owning_port->owning_controller;
97 struct sci_phy_user_params *phy_user;
98 struct sci_phy_oem_params *phy_oem;
97 int phy_idx = iphy->phy_index; 99 int phy_idx = iphy->phy_index;
98 struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
99 struct sci_phy_oem_params *phy_oem =
100 &ihost->oem_parameters.phys[phy_idx];
101 u32 phy_configuration;
102 struct sci_phy_cap phy_cap; 100 struct sci_phy_cap phy_cap;
101 u32 phy_configuration;
103 u32 parity_check = 0; 102 u32 parity_check = 0;
104 u32 parity_count = 0; 103 u32 parity_count = 0;
105 u32 llctl, link_rate; 104 u32 llctl, link_rate;
106 u32 clksm_value = 0; 105 u32 clksm_value = 0;
107 u32 sp_timeouts = 0; 106 u32 sp_timeouts = 0;
108 107
109 iphy->link_layer_registers = reg; 108 phy_user = &ihost->user_parameters.phys[phy_idx];
109 phy_oem = &ihost->oem_parameters.phys[phy_idx];
110 iphy->link_layer_registers = llr;
110 111
111 /* Set our IDENTIFY frame data */ 112 /* Set our IDENTIFY frame data */
112 #define SCI_END_DEVICE 0x01 113 #define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
116 SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | 117 SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
117 SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | 118 SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
118 SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), 119 SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
119 &iphy->link_layer_registers->transmit_identification); 120 &llr->transmit_identification);
120 121
121 /* Write the device SAS Address */ 122 /* Write the device SAS Address */
122 writel(0xFEDCBA98, 123 writel(0xFEDCBA98, &llr->sas_device_name_high);
123 &iphy->link_layer_registers->sas_device_name_high); 124 writel(phy_idx, &llr->sas_device_name_low);
124 writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
125 125
126 /* Write the source SAS Address */ 126 /* Write the source SAS Address */
127 writel(phy_oem->sas_address.high, 127 writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
128 &iphy->link_layer_registers->source_sas_address_high); 128 writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
129 writel(phy_oem->sas_address.low,
130 &iphy->link_layer_registers->source_sas_address_low);
131 129
132 /* Clear and Set the PHY Identifier */ 130 /* Clear and Set the PHY Identifier */
133 writel(0, &iphy->link_layer_registers->identify_frame_phy_id); 131 writel(0, &llr->identify_frame_phy_id);
134 writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), 132 writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
135 &iphy->link_layer_registers->identify_frame_phy_id);
136 133
137 /* Change the initial state of the phy configuration register */ 134 /* Change the initial state of the phy configuration register */
138 phy_configuration = 135 phy_configuration = readl(&llr->phy_configuration);
139 readl(&iphy->link_layer_registers->phy_configuration);
140 136
141 /* Hold OOB state machine in reset */ 137 /* Hold OOB state machine in reset */
142 phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); 138 phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
143 writel(phy_configuration, 139 writel(phy_configuration, &llr->phy_configuration);
144 &iphy->link_layer_registers->phy_configuration);
145 140
146 /* Configure the SNW capabilities */ 141 /* Configure the SNW capabilities */
147 phy_cap.all = 0; 142 phy_cap.all = 0;
@@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
149 phy_cap.gen3_no_ssc = 1; 144 phy_cap.gen3_no_ssc = 1;
150 phy_cap.gen2_no_ssc = 1; 145 phy_cap.gen2_no_ssc = 1;
151 phy_cap.gen1_no_ssc = 1; 146 phy_cap.gen1_no_ssc = 1;
152 if (ihost->oem_parameters.controller.do_enable_ssc == true) { 147 if (ihost->oem_parameters.controller.do_enable_ssc) {
148 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
149 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
150 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
151 bool en_sas = false;
152 bool en_sata = false;
153 u32 sas_type = 0;
154 u32 sata_spread = 0x2;
155 u32 sas_spread = 0x2;
156
153 phy_cap.gen3_ssc = 1; 157 phy_cap.gen3_ssc = 1;
154 phy_cap.gen2_ssc = 1; 158 phy_cap.gen2_ssc = 1;
155 phy_cap.gen1_ssc = 1; 159 phy_cap.gen1_ssc = 1;
160
161 if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
162 en_sas = en_sata = true;
163 else {
164 sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
165 sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
166
167 if (sata_spread)
168 en_sata = true;
169
170 if (sas_spread) {
171 en_sas = true;
172 sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
173 }
174
175 }
176
177 if (en_sas) {
178 u32 reg;
179
180 reg = readl(&xcvr->afe_xcvr_control0);
181 reg |= (0x00100000 | (sas_type << 19));
182 writel(reg, &xcvr->afe_xcvr_control0);
183
184 reg = readl(&xcvr->afe_tx_ssc_control);
185 reg |= sas_spread << 8;
186 writel(reg, &xcvr->afe_tx_ssc_control);
187 }
188
189 if (en_sata) {
190 u32 reg;
191
192 reg = readl(&xcvr->afe_tx_ssc_control);
193 reg |= sata_spread;
194 writel(reg, &xcvr->afe_tx_ssc_control);
195
196 reg = readl(&llr->stp_control);
197 reg |= 1 << 12;
198 writel(reg, &llr->stp_control);
199 }
156 } 200 }
157 201
158 /* 202 /* The SAS specification indicates that the phy_capabilities that
159 * The SAS specification indicates that the phy_capabilities that 203 * are transmitted shall have an even parity. Calculate the parity.
160 * are transmitted shall have an even parity. Calculate the parity. */ 204 */
161 parity_check = phy_cap.all; 205 parity_check = phy_cap.all;
162 while (parity_check != 0) { 206 while (parity_check != 0) {
163 if (parity_check & 0x1) 207 if (parity_check & 0x1)
@@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
165 parity_check >>= 1; 209 parity_check >>= 1;
166 } 210 }
167 211
168 /* 212 /* If parity indicates there are an odd number of bits set, then
169 * If parity indicates there are an odd number of bits set, then 213 * set the parity bit to 1 in the phy capabilities.
170 * set the parity bit to 1 in the phy capabilities. */ 214 */
171 if ((parity_count % 2) != 0) 215 if ((parity_count % 2) != 0)
172 phy_cap.parity = 1; 216 phy_cap.parity = 1;
173 217
174 writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities); 218 writel(phy_cap.all, &llr->phy_capabilities);
175 219
176 /* Set the enable spinup period but disable the ability to send 220 /* Set the enable spinup period but disable the ability to send
177 * notify enable spinup 221 * notify enable spinup
178 */ 222 */
179 writel(SCU_ENSPINUP_GEN_VAL(COUNT, 223 writel(SCU_ENSPINUP_GEN_VAL(COUNT,
180 phy_user->notify_enable_spin_up_insertion_frequency), 224 phy_user->notify_enable_spin_up_insertion_frequency),
181 &iphy->link_layer_registers->notify_enable_spinup_control); 225 &llr->notify_enable_spinup_control);
182 226
183 /* Write the ALIGN Insertion Ferequency for connected phy and 227 /* Write the ALIGN Insertion Ferequency for connected phy and
184 * inpendent of connected state 228 * inpendent of connected state
@@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
189 clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, 233 clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
190 phy_user->align_insertion_frequency); 234 phy_user->align_insertion_frequency);
191 235
192 writel(clksm_value, &iphy->link_layer_registers->clock_skew_management); 236 writel(clksm_value, &llr->clock_skew_management);
193 237
194 /* @todo Provide a way to write this register correctly */ 238 if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
195 writel(0x02108421, 239 writel(0x04210400, &llr->afe_lookup_table_control);
196 &iphy->link_layer_registers->afe_lookup_table_control); 240 writel(0x020A7C05, &llr->sas_primitive_timeout);
241 } else
242 writel(0x02108421, &llr->afe_lookup_table_control);
197 243
198 llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, 244 llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
199 (u8)ihost->user_parameters.no_outbound_task_timeout); 245 (u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
210 break; 256 break;
211 } 257 }
212 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); 258 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
213 writel(llctl, &iphy->link_layer_registers->link_layer_control); 259 writel(llctl, &llr->link_layer_control);
214 260
215 sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); 261 sp_timeouts = readl(&llr->sas_phy_timeouts);
216 262
217 /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ 263 /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
218 sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); 264 sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
222 */ 268 */
223 sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); 269 sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
224 270
225 writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); 271 writel(sp_timeouts, &llr->sas_phy_timeouts);
226 272
227 if (is_a2(ihost->pdev)) { 273 if (is_a2(ihost->pdev)) {
228 /* Program the max ARB time for the PHY to 700us so we inter-operate with 274 /* Program the max ARB time for the PHY to 700us so we
229 * the PMC expander which shuts down PHYs if the expander PHY generates too 275 * inter-operate with the PMC expander which shuts down
230 * many breaks. This time value will guarantee that the initiator PHY will 276 * PHYs if the expander PHY generates too many breaks.
231 * generate the break. 277 * This time value will guarantee that the initiator PHY
278 * will generate the break.
232 */ 279 */
233 writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, 280 writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
234 &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout); 281 &llr->maximum_arbitration_wait_timer_timeout);
235 } 282 }
236 283
237 /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */ 284 /* Disable link layer hang detection, rely on the OS timeout for
238 writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout); 285 * I/O timeouts.
286 */
287 writel(0, &llr->link_layer_hang_detection_timeout);
239 288
240 /* We can exit the initial state to the stopped state */ 289 /* We can exit the initial state to the stopped state */
241 sci_change_state(&iphy->sm, SCI_PHY_STOPPED); 290 sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine(
1049 writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); 1098 writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
1050} 1099}
1051 1100
1052/** 1101static void scu_link_layer_start_oob(struct isci_phy *iphy)
1053 *
1054 *
1055 * This method will start the OOB/SN state machine for this struct isci_phy object.
1056 */
1057static void scu_link_layer_start_oob(
1058 struct isci_phy *iphy)
1059{ 1102{
1060 u32 scu_sas_pcfg_value; 1103 struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
1061 1104 u32 val;
1062 scu_sas_pcfg_value = 1105
1063 readl(&iphy->link_layer_registers->phy_configuration); 1106 /** Reset OOB sequence - start */
1064 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); 1107 val = readl(&ll->phy_configuration);
1065 scu_sas_pcfg_value &= 1108 val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
1066 ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | 1109 SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
1067 SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); 1110 writel(val, &ll->phy_configuration);
1068 writel(scu_sas_pcfg_value, 1111 readl(&ll->phy_configuration); /* flush */
1069 &iphy->link_layer_registers->phy_configuration); 1112 /** Reset OOB sequence - end */
1113
1114 /** Start OOB sequence - start */
1115 val = readl(&ll->phy_configuration);
1116 val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
1117 writel(val, &ll->phy_configuration);
1118 readl(&ll->phy_configuration); /* flush */
1119 /** Start OOB sequence - end */
1070} 1120}
1071 1121
1072/** 1122/**
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index ac7f27749f9..7c6ac58a5c4 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport)
114 * value is returned if the specified port is not valid. When this value is 114 * value is returned if the specified port is not valid. When this value is
115 * returned, no data is copied to the properties output parameter. 115 * returned, no data is copied to the properties output parameter.
116 */ 116 */
117static enum sci_status sci_port_get_properties(struct isci_port *iport, 117enum sci_status sci_port_get_properties(struct isci_port *iport,
118 struct sci_port_properties *prop) 118 struct sci_port_properties *prop)
119{ 119{
120 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) 120 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
647 } 647 }
648} 648}
649 649
650static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, 650static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
651 bool do_notify_user) 651{
652 sci_phy_resume(iphy);
653 iport->enabled_phy_mask |= 1 << iphy->phy_index;
654}
655
656static void sci_port_activate_phy(struct isci_port *iport,
657 struct isci_phy *iphy,
658 u8 flags)
652{ 659{
653 struct isci_host *ihost = iport->owning_controller; 660 struct isci_host *ihost = iport->owning_controller;
654 661
655 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) 662 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
656 sci_phy_resume(iphy); 663 sci_phy_resume(iphy);
657 664
658 iport->active_phy_mask |= 1 << iphy->phy_index; 665 iport->active_phy_mask |= 1 << iphy->phy_index;
659 666
660 sci_controller_clear_invalid_phy(ihost, iphy); 667 sci_controller_clear_invalid_phy(ihost, iphy);
661 668
662 if (do_notify_user == true) 669 if (flags & PF_NOTIFY)
663 isci_port_link_up(ihost, iport, iphy); 670 isci_port_link_up(ihost, iport, iphy);
664} 671}
665 672
@@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
669 struct isci_host *ihost = iport->owning_controller; 676 struct isci_host *ihost = iport->owning_controller;
670 677
671 iport->active_phy_mask &= ~(1 << iphy->phy_index); 678 iport->active_phy_mask &= ~(1 << iphy->phy_index);
679 iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
672 if (!iport->active_phy_mask) 680 if (!iport->active_phy_mask)
673 iport->last_active_phy = iphy->phy_index; 681 iport->last_active_phy = iphy->phy_index;
674 682
675 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; 683 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
676 684
677 /* Re-assign the phy back to the LP as if it were a narrow port */ 685 /* Re-assign the phy back to the LP as if it were a narrow port for APC
678 writel(iphy->phy_index, 686 * mode. For MPC mode, the phy will remain in the port.
679 &iport->port_pe_configuration_register[iphy->phy_index]); 687 */
688 if (iport->owning_controller->oem_parameters.controller.mode_type ==
689 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
690 writel(iphy->phy_index,
691 &iport->port_pe_configuration_register[iphy->phy_index]);
680 692
681 if (do_notify_user == true) 693 if (do_notify_user == true)
682 isci_port_link_down(ihost, iphy, iport); 694 isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
701 * sci_port_general_link_up_handler - phy can be assigned to port? 713 * sci_port_general_link_up_handler - phy can be assigned to port?
702 * @sci_port: sci_port object for which has a phy that has gone link up. 714 * @sci_port: sci_port object for which has a phy that has gone link up.
703 * @sci_phy: This is the struct isci_phy object that has gone link up. 715 * @sci_phy: This is the struct isci_phy object that has gone link up.
704 * @do_notify_user: This parameter specifies whether to inform the user (via 716 * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
705 * sci_port_link_up()) as to the fact that a new phy as become ready.
706 * 717 *
707 * Determine if this phy can be assigned to this 718 * Determine if this phy can be assigned to this port . If the phy is
708 * port . If the phy is not a valid PHY for 719 * not a valid PHY for this port then the function will notify the user.
709 * this port then the function will notify the user. A PHY can only be 720 * A PHY can only be part of a port if it's attached SAS ADDRESS is the
710 * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in 721 * same as all other PHYs in the same port.
711 * the same port. none
712 */ 722 */
713static void sci_port_general_link_up_handler(struct isci_port *iport, 723static void sci_port_general_link_up_handler(struct isci_port *iport,
714 struct isci_phy *iphy, 724 struct isci_phy *iphy,
715 bool do_notify_user) 725 u8 flags)
716{ 726{
717 struct sci_sas_address port_sas_address; 727 struct sci_sas_address port_sas_address;
718 struct sci_sas_address phy_sas_address; 728 struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
730 iport->active_phy_mask == 0) { 740 iport->active_phy_mask == 0) {
731 struct sci_base_state_machine *sm = &iport->sm; 741 struct sci_base_state_machine *sm = &iport->sm;
732 742
733 sci_port_activate_phy(iport, iphy, do_notify_user); 743 sci_port_activate_phy(iport, iphy, flags);
734 if (sm->current_state_id == SCI_PORT_RESETTING) 744 if (sm->current_state_id == SCI_PORT_RESETTING)
735 port_state_machine_change(iport, SCI_PORT_READY); 745 port_state_machine_change(iport, SCI_PORT_READY);
736 } else 746 } else
@@ -781,11 +791,16 @@ bool sci_port_link_detected(
781 struct isci_phy *iphy) 791 struct isci_phy *iphy)
782{ 792{
783 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && 793 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
784 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && 794 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
785 sci_port_is_wide(iport)) { 795 if (sci_port_is_wide(iport)) {
786 sci_port_invalid_link_up(iport, iphy); 796 sci_port_invalid_link_up(iport, iphy);
787 797 return false;
788 return false; 798 } else {
799 struct isci_host *ihost = iport->owning_controller;
800 struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
801 writel(iphy->phy_index,
802 &dst_port->port_pe_configuration_register[iphy->phy_index]);
803 }
789 } 804 }
790 805
791 return true; 806 return true;
@@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine
975 } 990 }
976} 991}
977 992
993static void scic_sds_port_ready_substate_waiting_exit(
994 struct sci_base_state_machine *sm)
995{
996 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
997 sci_port_resume_port_task_scheduler(iport);
998}
999
978static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) 1000static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
979{ 1001{
980 u32 index; 1002 u32 index;
@@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
988 writel(iport->physical_port_index, 1010 writel(iport->physical_port_index,
989 &iport->port_pe_configuration_register[ 1011 &iport->port_pe_configuration_register[
990 iport->phy_table[index]->phy_index]); 1012 iport->phy_table[index]->phy_index]);
1013 if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
1014 sci_port_resume_phy(iport, iport->phy_table[index]);
991 } 1015 }
992 } 1016 }
993 1017
994 sci_port_update_viit_entry(iport); 1018 sci_port_update_viit_entry(iport);
995 1019
996 sci_port_resume_port_task_scheduler(iport);
997
998 /* 1020 /*
999 * Post the dummy task for the port so the hardware can schedule 1021 * Post the dummy task for the port so the hardware can schedule
1000 * io correctly 1022 * io correctly
@@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
1061 if (iport->active_phy_mask == 0) { 1083 if (iport->active_phy_mask == 0) {
1062 isci_port_not_ready(ihost, iport); 1084 isci_port_not_ready(ihost, iport);
1063 1085
1064 port_state_machine_change(iport, 1086 port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1065 SCI_PORT_SUB_WAITING); 1087 } else
1066 } else if (iport->started_request_count == 0) 1088 port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1067 port_state_machine_change(iport,
1068 SCI_PORT_SUB_OPERATIONAL);
1069}
1070
1071static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
1072{
1073 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1074
1075 sci_port_suspend_port_task_scheduler(iport);
1076 if (iport->ready_exit)
1077 sci_port_invalidate_dummy_remote_node(iport);
1078} 1089}
1079 1090
1080enum sci_status sci_port_start(struct isci_port *iport) 1091enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1252 if (status != SCI_SUCCESS) 1263 if (status != SCI_SUCCESS)
1253 return status; 1264 return status;
1254 1265
1255 sci_port_general_link_up_handler(iport, iphy, true); 1266 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1256 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; 1267 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1257 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); 1268 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1258 1269
@@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1262 1273
1263 if (status != SCI_SUCCESS) 1274 if (status != SCI_SUCCESS)
1264 return status; 1275 return status;
1265 sci_port_general_link_up_handler(iport, iphy, true); 1276 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1266 1277
1267 /* Re-enter the configuring state since this may be the last phy in 1278 /* Re-enter the configuring state since this may be the last phy in
1268 * the port. 1279 * the port.
@@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
1338 /* Since this is the first phy going link up for the port we 1349 /* Since this is the first phy going link up for the port we
1339 * can just enable it and continue 1350 * can just enable it and continue
1340 */ 1351 */
1341 sci_port_activate_phy(iport, iphy, true); 1352 sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1342 1353
1343 port_state_machine_change(iport, 1354 port_state_machine_change(iport,
1344 SCI_PORT_SUB_OPERATIONAL); 1355 SCI_PORT_SUB_OPERATIONAL);
1345 return SCI_SUCCESS; 1356 return SCI_SUCCESS;
1346 case SCI_PORT_SUB_OPERATIONAL: 1357 case SCI_PORT_SUB_OPERATIONAL:
1347 sci_port_general_link_up_handler(iport, iphy, true); 1358 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1348 return SCI_SUCCESS; 1359 return SCI_SUCCESS;
1349 case SCI_PORT_RESETTING: 1360 case SCI_PORT_RESETTING:
1350 /* TODO We should make sure that the phy that has gone 1361 /* TODO We should make sure that the phy that has gone
@@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
1361 /* In the resetting state we don't notify the user regarding 1372 /* In the resetting state we don't notify the user regarding
1362 * link up and link down notifications. 1373 * link up and link down notifications.
1363 */ 1374 */
1364 sci_port_general_link_up_handler(iport, iphy, false); 1375 sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1365 return SCI_SUCCESS; 1376 return SCI_SUCCESS;
1366 default: 1377 default:
1367 dev_warn(sciport_to_dev(iport), 1378 dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = {
1584 }, 1595 },
1585 [SCI_PORT_SUB_WAITING] = { 1596 [SCI_PORT_SUB_WAITING] = {
1586 .enter_state = sci_port_ready_substate_waiting_enter, 1597 .enter_state = sci_port_ready_substate_waiting_enter,
1598 .exit_state = scic_sds_port_ready_substate_waiting_exit,
1587 }, 1599 },
1588 [SCI_PORT_SUB_OPERATIONAL] = { 1600 [SCI_PORT_SUB_OPERATIONAL] = {
1589 .enter_state = sci_port_ready_substate_operational_enter, 1601 .enter_state = sci_port_ready_substate_operational_enter,
1590 .exit_state = sci_port_ready_substate_operational_exit 1602 .exit_state = sci_port_ready_substate_operational_exit
1591 }, 1603 },
1592 [SCI_PORT_SUB_CONFIGURING] = { 1604 [SCI_PORT_SUB_CONFIGURING] = {
1593 .enter_state = sci_port_ready_substate_configuring_enter, 1605 .enter_state = sci_port_ready_substate_configuring_enter
1594 .exit_state = sci_port_ready_substate_configuring_exit
1595 }, 1606 },
1596 [SCI_PORT_RESETTING] = { 1607 [SCI_PORT_RESETTING] = {
1597 .exit_state = sci_port_resetting_state_exit 1608 .exit_state = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
1609 iport->logical_port_index = SCIC_SDS_DUMMY_PORT; 1620 iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
1610 iport->physical_port_index = index; 1621 iport->physical_port_index = index;
1611 iport->active_phy_mask = 0; 1622 iport->active_phy_mask = 0;
1623 iport->enabled_phy_mask = 0;
1612 iport->last_active_phy = 0; 1624 iport->last_active_phy = 0;
1613 iport->ready_exit = false; 1625 iport->ready_exit = false;
1614 1626
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index cb5ffbc3860..08116090eb7 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -63,6 +63,9 @@
63 63
64#define SCIC_SDS_DUMMY_PORT 0xFF 64#define SCIC_SDS_DUMMY_PORT 0xFF
65 65
66#define PF_NOTIFY (1 << 0)
67#define PF_RESUME (1 << 1)
68
66struct isci_phy; 69struct isci_phy;
67struct isci_host; 70struct isci_host;
68 71
@@ -83,6 +86,8 @@ enum isci_status {
83 * @logical_port_index: software port index 86 * @logical_port_index: software port index
84 * @physical_port_index: hardware port index 87 * @physical_port_index: hardware port index
85 * @active_phy_mask: identifies phy members 88 * @active_phy_mask: identifies phy members
89 * @enabled_phy_mask: phy mask for the port
90 * that are already part of the port
86 * @reserved_tag: 91 * @reserved_tag:
87 * @reserved_rni: reserver for port task scheduler workaround 92 * @reserved_rni: reserver for port task scheduler workaround
88 * @started_request_count: reference count for outstanding commands 93 * @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@ struct isci_port {
104 u8 logical_port_index; 109 u8 logical_port_index;
105 u8 physical_port_index; 110 u8 physical_port_index;
106 u8 active_phy_mask; 111 u8 active_phy_mask;
112 u8 enabled_phy_mask;
107 u8 last_active_phy; 113 u8 last_active_phy;
108 u16 reserved_rni; 114 u16 reserved_rni;
109 u16 reserved_tag; 115 u16 reserved_tag;
@@ -250,6 +256,10 @@ bool sci_port_link_detected(
250 struct isci_port *iport, 256 struct isci_port *iport,
251 struct isci_phy *iphy); 257 struct isci_phy *iphy);
252 258
259enum sci_status sci_port_get_properties(
260 struct isci_port *iport,
261 struct sci_port_properties *prop);
262
253enum sci_status sci_port_link_up(struct isci_port *iport, 263enum sci_status sci_port_link_up(struct isci_port *iport,
254 struct isci_phy *iphy); 264 struct isci_phy *iphy);
255enum sci_status sci_port_link_down(struct isci_port *iport, 265enum sci_status sci_port_link_down(struct isci_port *iport,
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 38a99d28114..6d1e9544cbe 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
57 57
58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) 58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) 59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100) 60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250)
61 61
62enum SCIC_SDS_APC_ACTIVITY { 62enum SCIC_SDS_APC_ACTIVITY {
63 SCIC_SDS_APC_SKIP_PHY, 63 SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
466 return sci_port_configuration_agent_validate_ports(ihost, port_agent); 466 return sci_port_configuration_agent_validate_ports(ihost, port_agent);
467} 467}
468 468
469/*
470 * This routine will restart the automatic port configuration timeout
471 * timer for the next time period. This could be caused by either a link
472 * down event or a link up event where we can not yet tell to which a phy
473 * belongs.
474 */
475static void sci_apc_agent_start_timer(
476 struct sci_port_configuration_agent *port_agent,
477 u32 timeout)
478{
479 if (port_agent->timer_pending)
480 sci_del_timer(&port_agent->timer);
481
482 port_agent->timer_pending = true;
483 sci_mod_timer(&port_agent->timer, timeout);
484}
485
469static void sci_apc_agent_configure_ports(struct isci_host *ihost, 486static void sci_apc_agent_configure_ports(struct isci_host *ihost,
470 struct sci_port_configuration_agent *port_agent, 487 struct sci_port_configuration_agent *port_agent,
471 struct isci_phy *iphy, 488 struct isci_phy *iphy,
@@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
565 break; 582 break;
566 583
567 case SCIC_SDS_APC_START_TIMER: 584 case SCIC_SDS_APC_START_TIMER:
568 /* 585 sci_apc_agent_start_timer(port_agent,
569 * This can occur for either a link down event, or a link 586 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
570 * up event where we cannot yet tell the port to which a
571 * phy belongs.
572 */
573 if (port_agent->timer_pending)
574 sci_del_timer(&port_agent->timer);
575
576 port_agent->timer_pending = true;
577 sci_mod_timer(&port_agent->timer,
578 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
579 break; 587 break;
580 588
581 case SCIC_SDS_APC_SKIP_PHY: 589 case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
607 if (!iport) { 615 if (!iport) {
608 /* the phy is not the part of this port */ 616 /* the phy is not the part of this port */
609 port_agent->phy_ready_mask |= 1 << phy_index; 617 port_agent->phy_ready_mask |= 1 << phy_index;
610 sci_apc_agent_configure_ports(ihost, port_agent, iphy, true); 618 sci_apc_agent_start_timer(port_agent,
619 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
611 } else { 620 } else {
612 /* the phy is already the part of the port */ 621 /* the phy is already the part of the port */
613 u32 port_state = iport->sm.current_state_id; 622 u32 port_state = iport->sm.current_state_id;
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index b5f4341de24..9b8117b9d75 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw
147 147
148 memcpy(orom, fw->data, fw->size); 148 memcpy(orom, fw->data, fw->size);
149 149
150 if (is_c0(pdev)) 150 if (is_c0(pdev) || is_c1(pdev))
151 goto out; 151 goto out;
152 152
153 /* 153 /*
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index 2c75248ca32..bb0e9d4d97c 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -152,7 +152,7 @@ struct sci_user_parameters {
152#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 152#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
153 153
154struct sci_oem_params; 154struct sci_oem_params;
155int sci_oem_parameters_validate(struct sci_oem_params *oem); 155int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
156 156
157struct isci_orom; 157struct isci_orom;
158struct isci_orom *isci_request_oprom(struct pci_dev *pdev); 158struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@ struct isci_oem_hdr {
191 0x1a, 0x04, 0xc6) 191 0x1a, 0x04, 0xc6)
192#define ISCI_EFI_VAR_NAME "RstScuO" 192#define ISCI_EFI_VAR_NAME "RstScuO"
193 193
194#define ISCI_ROM_VER_1_0 0x10
195#define ISCI_ROM_VER_1_1 0x11
196#define ISCI_ROM_VER_1_3 0x13
197#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3
198
194/* Allowed PORT configuration modes APC Automatic PORT configuration mode is 199/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
195 * defined by the OEM configuration parameters providing no PHY_MASK parameters 200 * defined by the OEM configuration parameters providing no PHY_MASK parameters
196 * for any PORT. i.e. There are no phys assigned to any of the ports at start. 201 * for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@ struct sci_oem_params {
220 struct { 225 struct {
221 uint8_t mode_type; 226 uint8_t mode_type;
222 uint8_t max_concurr_spin_up; 227 uint8_t max_concurr_spin_up;
223 uint8_t do_enable_ssc; 228 /*
224 uint8_t reserved; 229 * This bitfield indicates the OEM's desired default Tx
230 * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
231 * NOTE: Default SSC Modulation Frequency is 31.5KHz.
232 */
233 union {
234 struct {
235 /*
236 * NOTE: Max spread for SATA is +0 / -5000 PPM.
237 * Down-spreading SSC (only method allowed for SATA):
238 * SATA SSC Tx Disabled = 0x0
239 * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2
240 * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3
241 * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6
242 * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7
243 */
244 uint8_t ssc_sata_tx_spread_level:4;
245 /*
246 * SAS SSC Tx Disabled = 0x0
247 *
248 * NOTE: Max spread for SAS down-spreading +0 /
249 * -2300 PPM
250 * Down-spreading SSC:
251 * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2
252 * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3
253 *
254 * NOTE: Max spread for SAS center-spreading +2300 /
255 * -2300 PPM
256 * Center-spreading SSC:
257 * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3
258 * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6
259 */
260 uint8_t ssc_sas_tx_spread_level:3;
261 /*
262 * NOTE: Refer to the SSC section of the SAS 2.x
263 * Specification for proper setting of this field.
264 * For standard SAS Initiator SAS PHY operation it
265 * should be 0 for Down-spreading.
266 * SAS SSC Tx spread type:
267 * Down-spreading SSC = 0
268 * Center-spreading SSC = 1
269 */
270 uint8_t ssc_sas_tx_type:1;
271 };
272 uint8_t do_enable_ssc;
273 };
274 /*
275 * This field indicates length of the SAS/SATA cable between
276 * host and device.
277 * This field is used make relationship between analog
278 * parameters of the phy in the silicon and length of the cable.
279 * Supported cable attenuation levels:
280 * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
281 * 6m.
282 *
283 * This is bit mask field:
284 *
285 * BIT: (MSB) 7 6 5 4
286 * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Medium cable
287 * length assignment
288 * BIT: 3 2 1 0 (LSB)
289 * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Long cable length
290 * assignment
291 *
292 * BITS 7-4 are set when the cable length is assigned to medium
293 * BITS 3-0 are set when the cable length is assigned to long
294 *
295 * The BIT positions are clear when the cable length is
296 * assigned to short.
297 *
298 * Setting the bits for both long and medium cable length is
299 * undefined.
300 *
301 * A value of 0x84 would assign
302 * phy3 - medium
303 * phy2 - long
304 * phy1 - short
305 * phy0 - short
306 */
307 uint8_t cable_selection_mask;
225 } controller; 308 } controller;
226 309
227 struct { 310 struct {
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index b207cd3b15a..dd74b6ceeb8 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -53,6 +53,7 @@
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55#include <scsi/sas.h> 55#include <scsi/sas.h>
56#include <linux/bitops.h>
56#include "isci.h" 57#include "isci.h"
57#include "port.h" 58#include "port.h"
58#include "remote_device.h" 59#include "remote_device.h"
@@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1101 struct isci_remote_device *idev) 1102 struct isci_remote_device *idev)
1102{ 1103{
1103 enum sci_status status; 1104 enum sci_status status;
1105 struct sci_port_properties properties;
1104 struct domain_device *dev = idev->domain_dev; 1106 struct domain_device *dev = idev->domain_dev;
1105 1107
1106 sci_remote_device_construct(iport, idev); 1108 sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1110 * entries will be needed to store the remote node. 1112 * entries will be needed to store the remote node.
1111 */ 1113 */
1112 idev->is_direct_attached = true; 1114 idev->is_direct_attached = true;
1115
1116 sci_port_get_properties(iport, &properties);
1117 /* Get accurate port width from port's phy mask for a DA device. */
1118 idev->device_port_width = hweight32(properties.phy_mask);
1119
1113 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1120 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1114 idev, 1121 idev,
1115 &idev->rnc.remote_node_index); 1122 &idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1125 1132
1126 idev->connection_rate = sci_port_get_max_allowed_speed(iport); 1133 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1127 1134
1128 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1129 idev->device_port_width = 1;
1130
1131 return SCI_SUCCESS; 1135 return SCI_SUCCESS;
1132} 1136}
1133 1137
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 66ad3dc8949..f5a3f7d2bda 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
496 } 496 }
497 } 497 }
498 498
499 isci_print_tmf(tmf); 499 isci_print_tmf(ihost, tmf);
500 500
501 if (tmf->status == SCI_SUCCESS) 501 if (tmf->status == SCI_SUCCESS)
502 ret = TMF_RESP_FUNC_COMPLETE; 502 ret = TMF_RESP_FUNC_COMPLETE;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index bc78c0a41d5..1b27b3797c6 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -106,7 +106,6 @@ struct isci_tmf {
106 } resp; 106 } resp;
107 unsigned char lun[8]; 107 unsigned char lun[8];
108 u16 io_tag; 108 u16 io_tag;
109 struct isci_remote_device *device;
110 enum isci_tmf_function_codes tmf_code; 109 enum isci_tmf_function_codes tmf_code;
111 int status; 110 int status;
112 111
@@ -120,10 +119,10 @@ struct isci_tmf {
120 119
121}; 120};
122 121
123static inline void isci_print_tmf(struct isci_tmf *tmf) 122static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
124{ 123{
125 if (SAS_PROTOCOL_SATA == tmf->proto) 124 if (SAS_PROTOCOL_SATA == tmf->proto)
126 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, 125 dev_dbg(&ihost->pdev->dev,
127 "%s: status = %x\n" 126 "%s: status = %x\n"
128 "tmf->resp.d2h_fis.status = %x\n" 127 "tmf->resp.d2h_fis.status = %x\n"
129 "tmf->resp.d2h_fis.error = %x\n", 128 "tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf)
132 tmf->resp.d2h_fis.status, 131 tmf->resp.d2h_fis.status,
133 tmf->resp.d2h_fis.error); 132 tmf->resp.d2h_fis.error);
134 else 133 else
135 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, 134 dev_dbg(&ihost->pdev->dev,
136 "%s: status = %x\n" 135 "%s: status = %x\n"
137 "tmf->resp.resp_iu.data_present = %x\n" 136 "tmf->resp.resp_iu.data_present = %x\n"
138 "tmf->resp.resp_iu.status = %x\n" 137 "tmf->resp.resp_iu.status = %x\n"
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 7269e928824..1d1b0c9da29 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *);
61 * Locking Note: This function expects that the lport mutex is locked before 61 * Locking Note: This function expects that the lport mutex is locked before
62 * calling it. 62 * calling it.
63 */ 63 */
64void fc_disc_stop_rports(struct fc_disc *disc) 64static void fc_disc_stop_rports(struct fc_disc *disc)
65{ 65{
66 struct fc_lport *lport; 66 struct fc_lport *lport;
67 struct fc_rport_priv *rdata; 67 struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
682 * fc_disc_stop() - Stop discovery for a given lport 682 * fc_disc_stop() - Stop discovery for a given lport
683 * @lport: The local port that discovery should stop on 683 * @lport: The local port that discovery should stop on
684 */ 684 */
685void fc_disc_stop(struct fc_lport *lport) 685static void fc_disc_stop(struct fc_lport *lport)
686{ 686{
687 struct fc_disc *disc = &lport->disc; 687 struct fc_disc *disc = &lport->disc;
688 688
@@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport)
698 * This function will block until discovery has been 698 * This function will block until discovery has been
699 * completely stopped and all rports have been deleted. 699 * completely stopped and all rports have been deleted.
700 */ 700 */
701void fc_disc_stop_final(struct fc_lport *lport) 701static void fc_disc_stop_final(struct fc_lport *lport)
702{ 702{
703 fc_disc_stop(lport); 703 fc_disc_stop(lport);
704 lport->tt.rport_flush_queue(); 704 lport->tt.rport_flush_queue();
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index fb9161dc4ca..e17a28d324d 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -28,6 +28,7 @@
28#include <scsi/fc/fc_els.h> 28#include <scsi/fc/fc_els.h>
29#include <scsi/libfc.h> 29#include <scsi/libfc.h>
30#include <scsi/fc_encode.h> 30#include <scsi/fc_encode.h>
31#include "fc_libfc.h"
31 32
32/** 33/**
33 * fc_elsct_send() - Send an ELS or CT frame 34 * fc_elsct_send() - Send an ELS or CT frame
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 9de9db27e87..4d70d96fa5d 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -91,7 +91,7 @@ struct fc_exch_pool {
91 * It manages the allocation of exchange IDs. 91 * It manages the allocation of exchange IDs.
92 */ 92 */
93struct fc_exch_mgr { 93struct fc_exch_mgr {
94 struct fc_exch_pool *pool; 94 struct fc_exch_pool __percpu *pool;
95 mempool_t *ep_pool; 95 mempool_t *ep_pool;
96 enum fc_class class; 96 enum fc_class class;
97 struct kref kref; 97 struct kref kref;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 221875ec3d7..f607314810a 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
155 fsp->xfer_ddp = FC_XID_UNKNOWN; 155 fsp->xfer_ddp = FC_XID_UNKNOWN;
156 atomic_set(&fsp->ref_cnt, 1); 156 atomic_set(&fsp->ref_cnt, 1);
157 init_timer(&fsp->timer); 157 init_timer(&fsp->timer);
158 fsp->timer.data = (unsigned long)fsp;
158 INIT_LIST_HEAD(&fsp->list); 159 INIT_LIST_HEAD(&fsp->list);
159 spin_lock_init(&fsp->scsi_pkt_lock); 160 spin_lock_init(&fsp->scsi_pkt_lock);
160 } 161 }
@@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1850 } 1851 }
1851 put_cpu(); 1852 put_cpu();
1852 1853
1853 init_timer(&fsp->timer);
1854 fsp->timer.data = (unsigned long)fsp;
1855
1856 /* 1854 /*
1857 * send it to the lower layer 1855 * send it to the lower layer
1858 * if we get -1 return then put the request in the pending 1856 * if we get -1 return then put the request in the pending
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e77094a587e..83750ebb527 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs);
677 * @lport: The local port receiving the event 677 * @lport: The local port receiving the event
678 * @event: The discovery event 678 * @event: The discovery event
679 */ 679 */
680void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) 680static void fc_lport_disc_callback(struct fc_lport *lport,
681 enum fc_disc_event event)
681{ 682{
682 switch (event) { 683 switch (event) {
683 case DISC_EV_SUCCESS: 684 case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
1568 * Locking Note: The lport lock is expected to be held before calling 1569 * Locking Note: The lport lock is expected to be held before calling
1569 * this routine. 1570 * this routine.
1570 */ 1571 */
1571void fc_lport_enter_flogi(struct fc_lport *lport) 1572static void fc_lport_enter_flogi(struct fc_lport *lport)
1572{ 1573{
1573 struct fc_frame *fp; 1574 struct fc_frame *fp;
1574 1575
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9e434844a6..83aa1efec87 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work)
391 * If it appears we are already logged in, ADISC is used to verify 391 * If it appears we are already logged in, ADISC is used to verify
392 * the setup. 392 * the setup.
393 */ 393 */
394int fc_rport_login(struct fc_rport_priv *rdata) 394static int fc_rport_login(struct fc_rport_priv *rdata)
395{ 395{
396 mutex_lock(&rdata->rp_mutex); 396 mutex_lock(&rdata->rp_mutex);
397 397
@@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
451 * function will hold the rport lock, call an _enter_* 451 * function will hold the rport lock, call an _enter_*
452 * function and then unlock the rport. 452 * function and then unlock the rport.
453 */ 453 */
454int fc_rport_logoff(struct fc_rport_priv *rdata) 454static int fc_rport_logoff(struct fc_rport_priv *rdata)
455{ 455{
456 mutex_lock(&rdata->rp_mutex); 456 mutex_lock(&rdata->rp_mutex);
457 457
@@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata,
653 * @fp: The FLOGI response frame 653 * @fp: The FLOGI response frame
654 * @rp_arg: The remote port that received the FLOGI response 654 * @rp_arg: The remote port that received the FLOGI response
655 */ 655 */
656void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 656static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
657 void *rp_arg) 657 void *rp_arg)
658{ 658{
659 struct fc_rport_priv *rdata = rp_arg; 659 struct fc_rport_priv *rdata = rp_arg;
660 struct fc_lport *lport = rdata->local_port; 660 struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@ reject:
1520 * 1520 *
1521 * Locking Note: Called with the lport lock held. 1521 * Locking Note: Called with the lport lock held.
1522 */ 1522 */
1523void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) 1523static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
1524{ 1524{
1525 struct fc_seq_els_data els_data; 1525 struct fc_seq_els_data els_data;
1526 1526
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 5c1776406c9..15eefa1d61f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter)
306 adapter->host->sg_tablesize = adapter->sglen; 306 adapter->host->sg_tablesize = adapter->sglen;
307 307
308 308
309 /* use HP firmware and bios version encoding */ 309 /* use HP firmware and bios version encoding
310 Note: fw_version[0|1] and bios_version[0|1] were originally shifted
311 right 8 bits making them zero. This 0 value was hardcoded to fix
312 sparse warnings. */
310 if (adapter->product_info.subsysvid == HP_SUBSYS_VID) { 313 if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
311 sprintf (adapter->fw_version, "%c%d%d.%d%d", 314 sprintf (adapter->fw_version, "%c%d%d.%d%d",
312 adapter->product_info.fw_version[2], 315 adapter->product_info.fw_version[2],
313 adapter->product_info.fw_version[1] >> 8, 316 0,
314 adapter->product_info.fw_version[1] & 0x0f, 317 adapter->product_info.fw_version[1] & 0x0f,
315 adapter->product_info.fw_version[0] >> 8, 318 0,
316 adapter->product_info.fw_version[0] & 0x0f); 319 adapter->product_info.fw_version[0] & 0x0f);
317 sprintf (adapter->bios_version, "%c%d%d.%d%d", 320 sprintf (adapter->bios_version, "%c%d%d.%d%d",
318 adapter->product_info.bios_version[2], 321 adapter->product_info.bios_version[2],
319 adapter->product_info.bios_version[1] >> 8, 322 0,
320 adapter->product_info.bios_version[1] & 0x0f, 323 adapter->product_info.bios_version[1] & 0x0f,
321 adapter->product_info.bios_version[0] >> 8, 324 0,
322 adapter->product_info.bios_version[0] & 0x0f); 325 adapter->product_info.bios_version[0] & 0x0f);
323 } else { 326 } else {
324 memcpy(adapter->fw_version, 327 memcpy(adapter->fw_version,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index dd94c7d574f..e5f416f8042 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.06.12-rc1" 36#define MEGASAS_VERSION "00.00.06.14-rc1"
37#define MEGASAS_RELDATE "Oct. 5, 2011" 37#define MEGASAS_RELDATE "Jan. 6, 2012"
38#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -773,7 +773,6 @@ struct megasas_ctrl_info {
773 773
774#define MFI_OB_INTR_STATUS_MASK 0x00000002 774#define MFI_OB_INTR_STATUS_MASK 0x00000002
775#define MFI_POLL_TIMEOUT_SECS 60 775#define MFI_POLL_TIMEOUT_SECS 60
776#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
777 776
778#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 777#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
779#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 778#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
@@ -1353,7 +1352,6 @@ struct megasas_instance {
1353 u32 mfiStatus; 1352 u32 mfiStatus;
1354 u32 last_seq_num; 1353 u32 last_seq_num;
1355 1354
1356 struct timer_list io_completion_timer;
1357 struct list_head internal_reset_pending_q; 1355 struct list_head internal_reset_pending_q;
1358 1356
1359 /* Ptr to hba specific information */ 1357 /* Ptr to hba specific information */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 29a994f9c4f..8b300be4428 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.06.12-rc1 21 * Version : v00.00.06.14-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -59,14 +59,6 @@
59#include "megaraid_sas.h" 59#include "megaraid_sas.h"
60 60
61/* 61/*
62 * poll_mode_io:1- schedule complete completion from q cmd
63 */
64static unsigned int poll_mode_io;
65module_param_named(poll_mode_io, poll_mode_io, int, 0);
66MODULE_PARM_DESC(poll_mode_io,
67 "Complete cmds from IO path, (default=0)");
68
69/*
70 * Number of sectors per IO command 62 * Number of sectors per IO command
71 * Will be set in megasas_init_mfi if user does not provide 63 * Will be set in megasas_init_mfi if user does not provide
72 */ 64 */
@@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
1439 1431
1440 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1432 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1441 cmd->frame_count-1, instance->reg_set); 1433 cmd->frame_count-1, instance->reg_set);
1442 /*
1443 * Check if we have pend cmds to be completed
1444 */
1445 if (poll_mode_io && atomic_read(&instance->fw_outstanding))
1446 tasklet_schedule(&instance->isr_tasklet);
1447 1434
1448 return 0; 1435 return 0;
1449out_return_cmd: 1436out_return_cmd:
@@ -3370,47 +3357,6 @@ fail_fw_init:
3370 return -EINVAL; 3357 return -EINVAL;
3371} 3358}
3372 3359
3373/**
3374 * megasas_start_timer - Initializes a timer object
3375 * @instance: Adapter soft state
3376 * @timer: timer object to be initialized
3377 * @fn: timer function
3378 * @interval: time interval between timer function call
3379 */
3380static inline void
3381megasas_start_timer(struct megasas_instance *instance,
3382 struct timer_list *timer,
3383 void *fn, unsigned long interval)
3384{
3385 init_timer(timer);
3386 timer->expires = jiffies + interval;
3387 timer->data = (unsigned long)instance;
3388 timer->function = fn;
3389 add_timer(timer);
3390}
3391
3392/**
3393 * megasas_io_completion_timer - Timer fn
3394 * @instance_addr: Address of adapter soft state
3395 *
3396 * Schedules tasklet for cmd completion
3397 * if poll_mode_io is set
3398 */
3399static void
3400megasas_io_completion_timer(unsigned long instance_addr)
3401{
3402 struct megasas_instance *instance =
3403 (struct megasas_instance *)instance_addr;
3404
3405 if (atomic_read(&instance->fw_outstanding))
3406 tasklet_schedule(&instance->isr_tasklet);
3407
3408 /* Restart timer */
3409 if (poll_mode_io)
3410 mod_timer(&instance->io_completion_timer,
3411 jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
3412}
3413
3414static u32 3360static u32
3415megasas_init_adapter_mfi(struct megasas_instance *instance) 3361megasas_init_adapter_mfi(struct megasas_instance *instance)
3416{ 3362{
@@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
3638 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 3584 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
3639 (unsigned long)instance); 3585 (unsigned long)instance);
3640 3586
3641 /* Initialize the cmd completion timer */
3642 if (poll_mode_io)
3643 megasas_start_timer(instance, &instance->io_completion_timer,
3644 megasas_io_completion_timer,
3645 MEGASAS_COMPLETION_TIMER_INTERVAL);
3646 return 0; 3587 return 0;
3647 3588
3648fail_init_adapter: 3589fail_init_adapter:
@@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4369 host = instance->host; 4310 host = instance->host;
4370 instance->unload = 1; 4311 instance->unload = 1;
4371 4312
4372 if (poll_mode_io)
4373 del_timer_sync(&instance->io_completion_timer);
4374
4375 megasas_flush_cache(instance); 4313 megasas_flush_cache(instance);
4376 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 4314 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
4377 4315
@@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev)
4511 } 4449 }
4512 4450
4513 instance->instancet->enable_intr(instance->reg_set); 4451 instance->instancet->enable_intr(instance->reg_set);
4514
4515 /* Initialize the cmd completion timer */
4516 if (poll_mode_io)
4517 megasas_start_timer(instance, &instance->io_completion_timer,
4518 megasas_io_completion_timer,
4519 MEGASAS_COMPLETION_TIMER_INTERVAL);
4520 instance->unload = 0; 4452 instance->unload = 0;
4521 4453
4522 /* 4454 /*
@@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
4570 host = instance->host; 4502 host = instance->host;
4571 fusion = instance->ctrl_context; 4503 fusion = instance->ctrl_context;
4572 4504
4573 if (poll_mode_io)
4574 del_timer_sync(&instance->io_completion_timer);
4575
4576 scsi_remove_host(instance->host); 4505 scsi_remove_host(instance->host);
4577 megasas_flush_cache(instance); 4506 megasas_flush_cache(instance);
4578 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 4507 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
4773 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 4702 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
4774 cmd->frame->hdr.context = cmd->index; 4703 cmd->frame->hdr.context = cmd->index;
4775 cmd->frame->hdr.pad_0 = 0; 4704 cmd->frame->hdr.pad_0 = 0;
4705 cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
4706 MFI_FRAME_SENSE64);
4776 4707
4777 /* 4708 /*
4778 * The management interface between applications and the fw uses 4709 * The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
5219static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, 5150static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
5220 megasas_sysfs_set_dbg_lvl); 5151 megasas_sysfs_set_dbg_lvl);
5221 5152
5222static ssize_t
5223megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
5224{
5225 return sprintf(buf, "%u\n", poll_mode_io);
5226}
5227
5228static ssize_t
5229megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
5230 const char *buf, size_t count)
5231{
5232 int retval = count;
5233 int tmp = poll_mode_io;
5234 int i;
5235 struct megasas_instance *instance;
5236
5237 if (sscanf(buf, "%u", &poll_mode_io) < 1) {
5238 printk(KERN_ERR "megasas: could not set poll_mode_io\n");
5239 retval = -EINVAL;
5240 }
5241
5242 /*
5243 * Check if poll_mode_io is already set or is same as previous value
5244 */
5245 if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
5246 goto out;
5247
5248 if (poll_mode_io) {
5249 /*
5250 * Start timers for all adapters
5251 */
5252 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
5253 instance = megasas_mgmt_info.instance[i];
5254 if (instance) {
5255 megasas_start_timer(instance,
5256 &instance->io_completion_timer,
5257 megasas_io_completion_timer,
5258 MEGASAS_COMPLETION_TIMER_INTERVAL);
5259 }
5260 }
5261 } else {
5262 /*
5263 * Delete timers for all adapters
5264 */
5265 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
5266 instance = megasas_mgmt_info.instance[i];
5267 if (instance)
5268 del_timer_sync(&instance->io_completion_timer);
5269 }
5270 }
5271
5272out:
5273 return retval;
5274}
5275
5276static void 5153static void
5277megasas_aen_polling(struct work_struct *work) 5154megasas_aen_polling(struct work_struct *work)
5278{ 5155{
@@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work)
5502 kfree(ev); 5379 kfree(ev);
5503} 5380}
5504 5381
5505
5506static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
5507 megasas_sysfs_show_poll_mode_io,
5508 megasas_sysfs_set_poll_mode_io);
5509
5510/** 5382/**
5511 * megasas_init - Driver load entry point 5383 * megasas_init - Driver load entry point
5512 */ 5384 */
@@ -5566,11 +5438,6 @@ static int __init megasas_init(void)
5566 if (rval) 5438 if (rval)
5567 goto err_dcf_dbg_lvl; 5439 goto err_dcf_dbg_lvl;
5568 rval = driver_create_file(&megasas_pci_driver.driver, 5440 rval = driver_create_file(&megasas_pci_driver.driver,
5569 &driver_attr_poll_mode_io);
5570 if (rval)
5571 goto err_dcf_poll_mode_io;
5572
5573 rval = driver_create_file(&megasas_pci_driver.driver,
5574 &driver_attr_support_device_change); 5441 &driver_attr_support_device_change);
5575 if (rval) 5442 if (rval)
5576 goto err_dcf_support_device_change; 5443 goto err_dcf_support_device_change;
@@ -5579,10 +5446,6 @@ static int __init megasas_init(void)
5579 5446
5580err_dcf_support_device_change: 5447err_dcf_support_device_change:
5581 driver_remove_file(&megasas_pci_driver.driver, 5448 driver_remove_file(&megasas_pci_driver.driver,
5582 &driver_attr_poll_mode_io);
5583
5584err_dcf_poll_mode_io:
5585 driver_remove_file(&megasas_pci_driver.driver,
5586 &driver_attr_dbg_lvl); 5449 &driver_attr_dbg_lvl);
5587err_dcf_dbg_lvl: 5450err_dcf_dbg_lvl:
5588 driver_remove_file(&megasas_pci_driver.driver, 5451 driver_remove_file(&megasas_pci_driver.driver,
@@ -5607,8 +5470,6 @@ err_pcidrv:
5607static void __exit megasas_exit(void) 5470static void __exit megasas_exit(void)
5608{ 5471{
5609 driver_remove_file(&megasas_pci_driver.driver, 5472 driver_remove_file(&megasas_pci_driver.driver,
5610 &driver_attr_poll_mode_io);
5611 driver_remove_file(&megasas_pci_driver.driver,
5612 &driver_attr_dbg_lvl); 5473 &driver_attr_dbg_lvl);
5613 driver_remove_file(&megasas_pci_driver.driver, 5474 driver_remove_file(&megasas_pci_driver.driver,
5614 &driver_attr_support_poll_for_event); 5475 &driver_attr_support_poll_for_event);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5255dd688ac..294abb0defa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
282 else { 282 else {
283 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ 283 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
284 if ((raid->level >= 5) && 284 if ((raid->level >= 5) &&
285 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER)) 285 ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
286 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
287 raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
286 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 288 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
287 else if (raid->level == 1) { 289 else if (raid->level == 1) {
288 /* Get alternate Pd. */ 290 /* Get alternate Pd. */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 22a3ff02e48..bfe68545203 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,6 +150,8 @@
150#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */ 150#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
151 /* recovery timeout */ 151 /* recovery timeout */
152 152
153#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
154#define LSW(x) ((uint16_t)(x))
153#define LSDW(x) ((u32)((u64)(x))) 155#define LSDW(x) ((u32)((u64)(x)))
154#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) 156#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
155 157
@@ -671,6 +673,7 @@ struct scsi_qla_host {
671 uint16_t pri_ddb_idx; 673 uint16_t pri_ddb_idx;
672 uint16_t sec_ddb_idx; 674 uint16_t sec_ddb_idx;
673 int is_reset; 675 int is_reset;
676 uint16_t temperature;
674}; 677};
675 678
676struct ql4_task_data { 679struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1bdfa8120ac..90614f38b55 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
697 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 697 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
698 &ha->reg->ctrl_status); 698 &ha->reg->ctrl_status);
699 readl(&ha->reg->ctrl_status); 699 readl(&ha->reg->ctrl_status);
700 writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
701 &ha->reg->ctrl_status);
702 readl(&ha->reg->ctrl_status);
700 spin_unlock_irqrestore(&ha->hardware_lock, flags); 703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
701 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) { 704 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
702 DEBUG2(printk("scsi%ld: %s: Get firmware " 705 DEBUG2(printk("scsi%ld: %s: Get firmware "
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c2593782fbb..e1e66a45e4d 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -219,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
219 ha->mailbox_timeout_count++; 219 ha->mailbox_timeout_count++;
220 mbx_sts[0] = (-1); 220 mbx_sts[0] = (-1);
221 set_bit(DPC_RESET_HA, &ha->dpc_flags); 221 set_bit(DPC_RESET_HA, &ha->dpc_flags);
222 if (is_qla8022(ha)) {
223 ql4_printk(KERN_INFO, ha,
224 "disabling pause transmit on port 0 & 1.\n");
225 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
226 CRB_NIU_XG_PAUSE_CTL_P0 |
227 CRB_NIU_XG_PAUSE_CTL_P1);
228 }
222 goto mbox_exit; 229 goto mbox_exit;
223 } 230 }
224 231
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 8d6bc1b2ff1..78f1111158d 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1875,6 +1875,11 @@ exit:
1875int qla4_8xxx_load_risc(struct scsi_qla_host *ha) 1875int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
1876{ 1876{
1877 int retval; 1877 int retval;
1878
1879 /* clear the interrupt */
1880 writel(0, &ha->qla4_8xxx_reg->host_int);
1881 readl(&ha->qla4_8xxx_reg->host_int);
1882
1878 retval = qla4_8xxx_device_state_handler(ha); 1883 retval = qla4_8xxx_device_state_handler(ha);
1879 1884
1880 if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags)) 1885 if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 35376a1c3f1..dc45ac92369 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -19,12 +19,28 @@
19#define PHAN_PEG_RCV_INITIALIZED 0xff01 19#define PHAN_PEG_RCV_INITIALIZED 0xff01
20 20
21/*CRB_RELATED*/ 21/*CRB_RELATED*/
22#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200) 22#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200))
23#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X)) 23#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
24
25#define CRB_CMDPEG_STATE QLA82XX_REG(0x50) 24#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 25#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 26#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
27#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
28
29#define qla82xx_get_temp_val(x) ((x) >> 16)
30#define qla82xx_get_temp_state(x) ((x) & 0xffff)
31#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
32
33/*
34 * Temperature control.
35 */
36enum {
37 QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
38 QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
39 QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
40};
41
42#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
43#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
28 44
29#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 45#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
30#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E 46#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ec393a00c03..ce6d3b7f0c6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -35,43 +35,44 @@ static struct kmem_cache *srb_cachep;
35int ql4xdisablesysfsboot = 1; 35int ql4xdisablesysfsboot = 1;
36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(ql4xdisablesysfsboot, 37MODULE_PARM_DESC(ql4xdisablesysfsboot,
38 "Set to disable exporting boot targets to sysfs\n" 38 " Set to disable exporting boot targets to sysfs.\n"
39 " 0 - Export boot targets\n" 39 "\t\t 0 - Export boot targets\n"
40 " 1 - Do not export boot targets (Default)"); 40 "\t\t 1 - Do not export boot targets (Default)");
41 41
42int ql4xdontresethba = 0; 42int ql4xdontresethba = 0;
43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(ql4xdontresethba, 44MODULE_PARM_DESC(ql4xdontresethba,
45 "Don't reset the HBA for driver recovery \n" 45 " Don't reset the HBA for driver recovery.\n"
46 " 0 - It will reset HBA (Default)\n" 46 "\t\t 0 - It will reset HBA (Default)\n"
47 " 1 - It will NOT reset HBA"); 47 "\t\t 1 - It will NOT reset HBA");
48 48
49int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */ 49int ql4xextended_error_logging;
50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
51MODULE_PARM_DESC(ql4xextended_error_logging, 51MODULE_PARM_DESC(ql4xextended_error_logging,
52 "Option to enable extended error logging, " 52 " Option to enable extended error logging.\n"
53 "Default is 0 - no logging, 1 - debug logging"); 53 "\t\t 0 - no logging (Default)\n"
54 "\t\t 2 - debug logging");
54 55
55int ql4xenablemsix = 1; 56int ql4xenablemsix = 1;
56module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 57module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
57MODULE_PARM_DESC(ql4xenablemsix, 58MODULE_PARM_DESC(ql4xenablemsix,
58 "Set to enable MSI or MSI-X interrupt mechanism.\n" 59 " Set to enable MSI or MSI-X interrupt mechanism.\n"
59 " 0 = enable INTx interrupt mechanism.\n" 60 "\t\t 0 = enable INTx interrupt mechanism.\n"
60 " 1 = enable MSI-X interrupt mechanism (Default).\n" 61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
61 " 2 = enable MSI interrupt mechanism."); 62 "\t\t 2 = enable MSI interrupt mechanism.");
62 63
63#define QL4_DEF_QDEPTH 32 64#define QL4_DEF_QDEPTH 32
64static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 65static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
65module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 66module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
66MODULE_PARM_DESC(ql4xmaxqdepth, 67MODULE_PARM_DESC(ql4xmaxqdepth,
67 "Maximum queue depth to report for target devices.\n" 68 " Maximum queue depth to report for target devices.\n"
68 " Default: 32."); 69 "\t\t Default: 32.");
69 70
70static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
71module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 72module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
72MODULE_PARM_DESC(ql4xsess_recovery_tmo, 73MODULE_PARM_DESC(ql4xsess_recovery_tmo,
73 "Target Session Recovery Timeout.\n" 74 "Target Session Recovery Timeout.\n"
74 " Default: 120 sec."); 75 "\t\t Default: 120 sec.");
75 76
76static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
77/* 78/*
@@ -1630,7 +1631,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1630 1631
1631 /* Update timers after login */ 1632 /* Update timers after login */
1632 ddb_entry->default_relogin_timeout = 1633 ddb_entry->default_relogin_timeout =
1633 le16_to_cpu(fw_ddb_entry->def_timeout); 1634 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
1635 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
1636 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
1634 ddb_entry->default_time2wait = 1637 ddb_entry->default_time2wait =
1635 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 1638 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1636 1639
@@ -1970,6 +1973,42 @@ mem_alloc_error_exit:
1970} 1973}
1971 1974
1972/** 1975/**
1976 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
1977 * @ha: adapter block pointer.
1978 *
1979 * Note: The caller should not hold the idc lock.
1980 **/
1981static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
1982{
1983 uint32_t temp, temp_state, temp_val;
1984 int status = QLA_SUCCESS;
1985
1986 temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
1987
1988 temp_state = qla82xx_get_temp_state(temp);
1989 temp_val = qla82xx_get_temp_val(temp);
1990
1991 if (temp_state == QLA82XX_TEMP_PANIC) {
1992 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
1993 " exceeds maximum allowed. Hardware has been shut"
1994 " down.\n", temp_val);
1995 status = QLA_ERROR;
1996 } else if (temp_state == QLA82XX_TEMP_WARN) {
1997 if (ha->temperature == QLA82XX_TEMP_NORMAL)
1998 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
1999 " degrees C exceeds operating range."
2000 " Immediate action needed.\n", temp_val);
2001 } else {
2002 if (ha->temperature == QLA82XX_TEMP_WARN)
2003 ql4_printk(KERN_INFO, ha, "Device temperature is"
2004 " now %d degrees C in normal range.\n",
2005 temp_val);
2006 }
2007 ha->temperature = temp_state;
2008 return status;
2009}
2010
2011/**
1973 * qla4_8xxx_check_fw_alive - Check firmware health 2012 * qla4_8xxx_check_fw_alive - Check firmware health
1974 * @ha: Pointer to host adapter structure. 2013 * @ha: Pointer to host adapter structure.
1975 * 2014 *
@@ -2040,7 +2079,16 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2040 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 2079 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2041 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 2080 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2042 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2081 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2043 if (dev_state == QLA82XX_DEV_NEED_RESET && 2082
2083 if (qla4_8xxx_check_temp(ha)) {
2084 ql4_printk(KERN_INFO, ha, "disabling pause"
2085 " transmit on port 0 & 1.\n");
2086 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2087 CRB_NIU_XG_PAUSE_CTL_P0 |
2088 CRB_NIU_XG_PAUSE_CTL_P1);
2089 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2090 qla4xxx_wake_dpc(ha);
2091 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
2044 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 2092 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2045 if (!ql4xdontresethba) { 2093 if (!ql4xdontresethba) {
2046 ql4_printk(KERN_INFO, ha, "%s: HW State: " 2094 ql4_printk(KERN_INFO, ha, "%s: HW State: "
@@ -2057,9 +2105,21 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2057 } else { 2105 } else {
2058 /* Check firmware health */ 2106 /* Check firmware health */
2059 if (qla4_8xxx_check_fw_alive(ha)) { 2107 if (qla4_8xxx_check_fw_alive(ha)) {
2108 ql4_printk(KERN_INFO, ha, "disabling pause"
2109 " transmit on port 0 & 1.\n");
2110 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2111 CRB_NIU_XG_PAUSE_CTL_P0 |
2112 CRB_NIU_XG_PAUSE_CTL_P1);
2060 halt_status = qla4_8xxx_rd_32(ha, 2113 halt_status = qla4_8xxx_rd_32(ha,
2061 QLA82XX_PEG_HALT_STATUS1); 2114 QLA82XX_PEG_HALT_STATUS1);
2062 2115
2116 if (LSW(MSB(halt_status)) == 0x67)
2117 ql4_printk(KERN_ERR, ha, "%s:"
2118 " Firmware aborted with"
2119 " error code 0x00006700."
2120 " Device is being reset\n",
2121 __func__);
2122
2063 /* Since we cannot change dev_state in interrupt 2123 /* Since we cannot change dev_state in interrupt
2064 * context, set appropriate DPC flag then wakeup 2124 * context, set appropriate DPC flag then wakeup
2065 * DPC */ 2125 * DPC */
@@ -2078,7 +2138,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2078 } 2138 }
2079} 2139}
2080 2140
2081void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 2141static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
2082{ 2142{
2083 struct iscsi_session *sess; 2143 struct iscsi_session *sess;
2084 struct ddb_entry *ddb_entry; 2144 struct ddb_entry *ddb_entry;
@@ -3826,16 +3886,14 @@ exit_check:
3826 return ret; 3886 return ret;
3827} 3887}
3828 3888
3829static void qla4xxx_free_nt_list(struct list_head *list_nt) 3889static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
3830{ 3890{
3831 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 3891 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
3832 3892
3833 /* Free up the normaltargets list */ 3893 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
3834 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 3894 list_del_init(&ddb_idx->list);
3835 list_del_init(&nt_ddb_idx->list); 3895 vfree(ddb_idx);
3836 vfree(nt_ddb_idx);
3837 } 3896 }
3838
3839} 3897}
3840 3898
3841static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 3899static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
@@ -3884,6 +3942,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
3884static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 3942static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
3885 struct ddb_entry *ddb_entry) 3943 struct ddb_entry *ddb_entry)
3886{ 3944{
3945 uint16_t def_timeout;
3946
3887 ddb_entry->ddb_type = FLASH_DDB; 3947 ddb_entry->ddb_type = FLASH_DDB;
3888 ddb_entry->fw_ddb_index = INVALID_ENTRY; 3948 ddb_entry->fw_ddb_index = INVALID_ENTRY;
3889 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3949 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
@@ -3894,9 +3954,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
3894 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 3954 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
3895 atomic_set(&ddb_entry->relogin_timer, 0); 3955 atomic_set(&ddb_entry->relogin_timer, 0);
3896 atomic_set(&ddb_entry->relogin_retry_count, 0); 3956 atomic_set(&ddb_entry->relogin_retry_count, 0);
3897 3957 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
3898 ddb_entry->default_relogin_timeout = 3958 ddb_entry->default_relogin_timeout =
3899 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 3959 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
3960 def_timeout : LOGIN_TOV;
3900 ddb_entry->default_time2wait = 3961 ddb_entry->default_time2wait =
3901 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 3962 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
3902} 3963}
@@ -3934,7 +3995,6 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
3934 ip_state == IP_ADDRSTATE_DEPRICATED || 3995 ip_state == IP_ADDRSTATE_DEPRICATED ||
3935 ip_state == IP_ADDRSTATE_DISABLING) 3996 ip_state == IP_ADDRSTATE_DISABLING)
3936 ip_idx[idx] = -1; 3997 ip_idx[idx] = -1;
3937
3938 } 3998 }
3939 3999
3940 /* Break if all IP states checked */ 4000 /* Break if all IP states checked */
@@ -3947,58 +4007,37 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
3947 } while (time_after(wtime, jiffies)); 4007 } while (time_after(wtime, jiffies));
3948} 4008}
3949 4009
3950void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 4010static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4011 struct list_head *list_st)
3951{ 4012{
4013 struct qla_ddb_index *st_ddb_idx;
3952 int max_ddbs; 4014 int max_ddbs;
4015 int fw_idx_size;
4016 struct dev_db_entry *fw_ddb_entry;
4017 dma_addr_t fw_ddb_dma;
3953 int ret; 4018 int ret;
3954 uint32_t idx = 0, next_idx = 0; 4019 uint32_t idx = 0, next_idx = 0;
3955 uint32_t state = 0, conn_err = 0; 4020 uint32_t state = 0, conn_err = 0;
3956 uint16_t conn_id; 4021 uint16_t conn_id = 0;
3957 struct dev_db_entry *fw_ddb_entry;
3958 struct ddb_entry *ddb_entry = NULL;
3959 dma_addr_t fw_ddb_dma;
3960 struct iscsi_cls_session *cls_sess;
3961 struct iscsi_session *sess;
3962 struct iscsi_cls_conn *cls_conn;
3963 struct iscsi_endpoint *ep;
3964 uint16_t cmds_max = 32, tmo = 0;
3965 uint32_t initial_cmdsn = 0;
3966 struct list_head list_st, list_nt; /* List of sendtargets */
3967 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
3968 int fw_idx_size;
3969 unsigned long wtime;
3970 struct qla_ddb_index *nt_ddb_idx;
3971
3972 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3973 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
3974 ha->is_reset = is_reset;
3975 return;
3976 }
3977 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
3978 MAX_DEV_DB_ENTRIES;
3979 4022
3980 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 4023 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
3981 &fw_ddb_dma); 4024 &fw_ddb_dma);
3982 if (fw_ddb_entry == NULL) { 4025 if (fw_ddb_entry == NULL) {
3983 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 4026 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
3984 goto exit_ddb_list; 4027 goto exit_st_list;
3985 } 4028 }
3986 4029
3987 INIT_LIST_HEAD(&list_st); 4030 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
3988 INIT_LIST_HEAD(&list_nt); 4031 MAX_DEV_DB_ENTRIES;
3989 fw_idx_size = sizeof(struct qla_ddb_index); 4032 fw_idx_size = sizeof(struct qla_ddb_index);
3990 4033
3991 for (idx = 0; idx < max_ddbs; idx = next_idx) { 4034 for (idx = 0; idx < max_ddbs; idx = next_idx) {
3992 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, 4035 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
3993 fw_ddb_dma, NULL, 4036 NULL, &next_idx, &state,
3994 &next_idx, &state, &conn_err, 4037 &conn_err, NULL, &conn_id);
3995 NULL, &conn_id);
3996 if (ret == QLA_ERROR) 4038 if (ret == QLA_ERROR)
3997 break; 4039 break;
3998 4040
3999 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4000 goto continue_next_st;
4001
4002 /* Check if ST, add to the list_st */ 4041 /* Check if ST, add to the list_st */
4003 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 4042 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4004 goto continue_next_st; 4043 goto continue_next_st;
@@ -4009,59 +4048,155 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4009 4048
4010 st_ddb_idx->fw_ddb_idx = idx; 4049 st_ddb_idx->fw_ddb_idx = idx;
4011 4050
4012 list_add_tail(&st_ddb_idx->list, &list_st); 4051 list_add_tail(&st_ddb_idx->list, list_st);
4013continue_next_st: 4052continue_next_st:
4014 if (next_idx == 0) 4053 if (next_idx == 0)
4015 break; 4054 break;
4016 } 4055 }
4017 4056
4018 /* Before issuing conn open mbox, ensure all IPs states are configured 4057exit_st_list:
4019 * Note, conn open fails if IPs are not configured 4058 if (fw_ddb_entry)
4059 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4060}
4061
4062/**
4063 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
4064 * @ha: pointer to adapter structure
4065 * @list_ddb: List from which failed ddb to be removed
4066 *
4067 * Iterate over the list of DDBs and find and remove DDBs that are either in
4068 * no connection active state or failed state
4069 **/
4070static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4071 struct list_head *list_ddb)
4072{
4073 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
4074 uint32_t next_idx = 0;
4075 uint32_t state = 0, conn_err = 0;
4076 int ret;
4077
4078 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4079 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
4080 NULL, 0, NULL, &next_idx, &state,
4081 &conn_err, NULL, NULL);
4082 if (ret == QLA_ERROR)
4083 continue;
4084
4085 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4086 state == DDB_DS_SESSION_FAILED) {
4087 list_del_init(&ddb_idx->list);
4088 vfree(ddb_idx);
4089 }
4090 }
4091}
4092
4093static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4094 struct dev_db_entry *fw_ddb_entry,
4095 int is_reset)
4096{
4097 struct iscsi_cls_session *cls_sess;
4098 struct iscsi_session *sess;
4099 struct iscsi_cls_conn *cls_conn;
4100 struct iscsi_endpoint *ep;
4101 uint16_t cmds_max = 32;
4102 uint16_t conn_id = 0;
4103 uint32_t initial_cmdsn = 0;
4104 int ret = QLA_SUCCESS;
4105
4106 struct ddb_entry *ddb_entry = NULL;
4107
4108 /* Create session object, with INVALID_ENTRY,
4109 * the targer_id would get set when we issue the login
4020 */ 4110 */
4021 qla4xxx_wait_for_ip_configuration(ha); 4111 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
4112 cmds_max, sizeof(struct ddb_entry),
4113 sizeof(struct ql4_task_data),
4114 initial_cmdsn, INVALID_ENTRY);
4115 if (!cls_sess) {
4116 ret = QLA_ERROR;
4117 goto exit_setup;
4118 }
4022 4119
4023 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 4120 /*
4024 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 4121 * so calling module_put function to decrement the
4025 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 4122 * reference count.
4123 **/
4124 module_put(qla4xxx_iscsi_transport.owner);
4125 sess = cls_sess->dd_data;
4126 ddb_entry = sess->dd_data;
4127 ddb_entry->sess = cls_sess;
4128
4129 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4130 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4131 sizeof(struct dev_db_entry));
4132
4133 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4134
4135 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4136
4137 if (!cls_conn) {
4138 ret = QLA_ERROR;
4139 goto exit_setup;
4026 } 4140 }
4027 4141
4028 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 4142 ddb_entry->conn = cls_conn;
4029 tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
4030 DEBUG2(ql4_printk(KERN_INFO, ha,
4031 "Default time to wait for build ddb %d\n", tmo));
4032 4143
4033 wtime = jiffies + (HZ * tmo); 4144 /* Setup ep, for displaying attributes in sysfs */
4034 do { 4145 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4035 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, 4146 if (ep) {
4036 list) { 4147 ep->conn = cls_conn;
4037 ret = qla4xxx_get_fwddb_entry(ha, 4148 cls_conn->ep = ep;
4038 st_ddb_idx->fw_ddb_idx, 4149 } else {
4039 NULL, 0, NULL, &next_idx, 4150 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
4040 &state, &conn_err, NULL, 4151 ret = QLA_ERROR;
4041 NULL); 4152 goto exit_setup;
4042 if (ret == QLA_ERROR) 4153 }
4043 continue;
4044 4154
4045 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 4155 /* Update sess/conn params */
4046 state == DDB_DS_SESSION_FAILED) { 4156 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
4047 list_del_init(&st_ddb_idx->list);
4048 vfree(st_ddb_idx);
4049 }
4050 }
4051 schedule_timeout_uninterruptible(HZ / 10);
4052 } while (time_after(wtime, jiffies));
4053 4157
4054 /* Free up the sendtargets list */ 4158 if (is_reset == RESET_ADAPTER) {
4055 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 4159 iscsi_block_session(cls_sess);
4056 list_del_init(&st_ddb_idx->list); 4160 /* Use the relogin path to discover new devices
4057 vfree(st_ddb_idx); 4161 * by short-circuting the logic of setting
4162 * timer to relogin - instead set the flags
4163 * to initiate login right away.
4164 */
4165 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4166 set_bit(DF_RELOGIN, &ddb_entry->flags);
4058 } 4167 }
4059 4168
4169exit_setup:
4170 return ret;
4171}
4172
4173static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4174 struct list_head *list_nt, int is_reset)
4175{
4176 struct dev_db_entry *fw_ddb_entry;
4177 dma_addr_t fw_ddb_dma;
4178 int max_ddbs;
4179 int fw_idx_size;
4180 int ret;
4181 uint32_t idx = 0, next_idx = 0;
4182 uint32_t state = 0, conn_err = 0;
4183 uint16_t conn_id = 0;
4184 struct qla_ddb_index *nt_ddb_idx;
4185
4186 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4187 &fw_ddb_dma);
4188 if (fw_ddb_entry == NULL) {
4189 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4190 goto exit_nt_list;
4191 }
4192 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4193 MAX_DEV_DB_ENTRIES;
4194 fw_idx_size = sizeof(struct qla_ddb_index);
4195
4060 for (idx = 0; idx < max_ddbs; idx = next_idx) { 4196 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4061 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, 4197 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4062 fw_ddb_dma, NULL, 4198 NULL, &next_idx, &state,
4063 &next_idx, &state, &conn_err, 4199 &conn_err, NULL, &conn_id);
4064 NULL, &conn_id);
4065 if (ret == QLA_ERROR) 4200 if (ret == QLA_ERROR)
4066 break; 4201 break;
4067 4202
@@ -4072,107 +4207,113 @@ continue_next_st:
4072 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 4207 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4073 goto continue_next_nt; 4208 goto continue_next_nt;
4074 4209
4075 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 4210 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
4076 state == DDB_DS_SESSION_FAILED) { 4211 state == DDB_DS_SESSION_FAILED))
4077 DEBUG2(ql4_printk(KERN_INFO, ha, 4212 goto continue_next_nt;
4078 "Adding DDB to session = 0x%x\n",
4079 idx));
4080 if (is_reset == INIT_ADAPTER) {
4081 nt_ddb_idx = vmalloc(fw_idx_size);
4082 if (!nt_ddb_idx)
4083 break;
4084
4085 nt_ddb_idx->fw_ddb_idx = idx;
4086
4087 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4088 sizeof(struct dev_db_entry));
4089
4090 if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
4091 fw_ddb_entry) == QLA_SUCCESS) {
4092 vfree(nt_ddb_idx);
4093 goto continue_next_nt;
4094 }
4095 list_add_tail(&nt_ddb_idx->list, &list_nt);
4096 } else if (is_reset == RESET_ADAPTER) {
4097 if (qla4xxx_is_session_exists(ha,
4098 fw_ddb_entry) == QLA_SUCCESS)
4099 goto continue_next_nt;
4100 }
4101 4213
4102 /* Create session object, with INVALID_ENTRY, 4214 DEBUG2(ql4_printk(KERN_INFO, ha,
4103 * the targer_id would get set when we issue the login 4215 "Adding DDB to session = 0x%x\n", idx));
4104 */ 4216 if (is_reset == INIT_ADAPTER) {
4105 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, 4217 nt_ddb_idx = vmalloc(fw_idx_size);
4106 ha->host, cmds_max, 4218 if (!nt_ddb_idx)
4107 sizeof(struct ddb_entry), 4219 break;
4108 sizeof(struct ql4_task_data),
4109 initial_cmdsn, INVALID_ENTRY);
4110 if (!cls_sess)
4111 goto exit_ddb_list;
4112 4220
4113 /* 4221 nt_ddb_idx->fw_ddb_idx = idx;
4114 * iscsi_session_setup increments the driver reference
4115 * count which wouldn't let the driver to be unloaded.
4116 * so calling module_put function to decrement the
4117 * reference count.
4118 **/
4119 module_put(qla4xxx_iscsi_transport.owner);
4120 sess = cls_sess->dd_data;
4121 ddb_entry = sess->dd_data;
4122 ddb_entry->sess = cls_sess;
4123 4222
4124 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 4223 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4125 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4126 sizeof(struct dev_db_entry)); 4224 sizeof(struct dev_db_entry));
4127 4225
4128 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry); 4226 if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
4129 4227 fw_ddb_entry) == QLA_SUCCESS) {
4130 cls_conn = iscsi_conn_setup(cls_sess, 4228 vfree(nt_ddb_idx);
4131 sizeof(struct qla_conn), 4229 goto continue_next_nt;
4132 conn_id);
4133 if (!cls_conn)
4134 goto exit_ddb_list;
4135
4136 ddb_entry->conn = cls_conn;
4137
4138 /* Setup ep, for displaying attributes in sysfs */
4139 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4140 if (ep) {
4141 ep->conn = cls_conn;
4142 cls_conn->ep = ep;
4143 } else {
4144 DEBUG2(ql4_printk(KERN_ERR, ha,
4145 "Unable to get ep\n"));
4146 }
4147
4148 /* Update sess/conn params */
4149 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
4150 cls_conn);
4151
4152 if (is_reset == RESET_ADAPTER) {
4153 iscsi_block_session(cls_sess);
4154 /* Use the relogin path to discover new devices
4155 * by short-circuting the logic of setting
4156 * timer to relogin - instead set the flags
4157 * to initiate login right away.
4158 */
4159 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4160 set_bit(DF_RELOGIN, &ddb_entry->flags);
4161 } 4230 }
4231 list_add_tail(&nt_ddb_idx->list, list_nt);
4232 } else if (is_reset == RESET_ADAPTER) {
4233 if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
4234 QLA_SUCCESS)
4235 goto continue_next_nt;
4162 } 4236 }
4237
4238 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
4239 if (ret == QLA_ERROR)
4240 goto exit_nt_list;
4241
4163continue_next_nt: 4242continue_next_nt:
4164 if (next_idx == 0) 4243 if (next_idx == 0)
4165 break; 4244 break;
4166 } 4245 }
4167exit_ddb_list: 4246
4168 qla4xxx_free_nt_list(&list_nt); 4247exit_nt_list:
4169 if (fw_ddb_entry) 4248 if (fw_ddb_entry)
4170 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 4249 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4250}
4251
4252/**
4253 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
4254 * @ha: pointer to adapter structure
4255 * @is_reset: Is this init path or reset path
4256 *
4257 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
4258 * using connection open, then create the list of normal targets (nt)
4259 * from firmware DDBs. Based on the list of nt setup session and connection
4260 * objects.
4261 **/
4262void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4263{
4264 uint16_t tmo = 0;
4265 struct list_head list_st, list_nt;
4266 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
4267 unsigned long wtime;
4268
4269 if (!test_bit(AF_LINK_UP, &ha->flags)) {
4270 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
4271 ha->is_reset = is_reset;
4272 return;
4273 }
4274
4275 INIT_LIST_HEAD(&list_st);
4276 INIT_LIST_HEAD(&list_nt);
4277
4278 qla4xxx_build_st_list(ha, &list_st);
4279
4280 /* Before issuing conn open mbox, ensure all IPs states are configured
4281 * Note, conn open fails if IPs are not configured
4282 */
4283 qla4xxx_wait_for_ip_configuration(ha);
4284
4285 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
4286 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
4287 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
4288 }
4289
4290 /* Wait to ensure all sendtargets are done for min 12 sec wait */
4291 tmo = ((ha->def_timeout > LOGIN_TOV) &&
4292 (ha->def_timeout < LOGIN_TOV * 10) ?
4293 ha->def_timeout : LOGIN_TOV);
4294
4295 DEBUG2(ql4_printk(KERN_INFO, ha,
4296 "Default time to wait for build ddb %d\n", tmo));
4297
4298 wtime = jiffies + (HZ * tmo);
4299 do {
4300 if (list_empty(&list_st))
4301 break;
4302
4303 qla4xxx_remove_failed_ddb(ha, &list_st);
4304 schedule_timeout_uninterruptible(HZ / 10);
4305 } while (time_after(wtime, jiffies));
4306
4307 /* Free up the sendtargets list */
4308 qla4xxx_free_ddb_list(&list_st);
4309
4310 qla4xxx_build_nt_list(ha, &list_nt, is_reset);
4311
4312 qla4xxx_free_ddb_list(&list_nt);
4171 4313
4172 qla4xxx_free_ddb_index(ha); 4314 qla4xxx_free_ddb_index(ha);
4173} 4315}
4174 4316
4175
4176/** 4317/**
4177 * qla4xxx_probe_adapter - callback function to probe HBA 4318 * qla4xxx_probe_adapter - callback function to probe HBA
4178 * @pdev: pointer to pci_dev structure 4319 * @pdev: pointer to pci_dev structure
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 26a3fa34a33..133989b3a9f 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k10" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f85cfa6c47b..b2c95dbe9d6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1316 } 1316 }
1317 1317
1318 if (scsi_target_is_busy(starget)) { 1318 if (scsi_target_is_busy(starget)) {
1319 if (list_empty(&sdev->starved_entry)) 1319 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1320 list_add_tail(&sdev->starved_entry,
1321 &shost->starved_list);
1322 return 0; 1320 return 0;
1323 } 1321 }
1324 1322
1325 /* We're OK to process the command, so we can't be starved */
1326 if (!list_empty(&sdev->starved_entry))
1327 list_del_init(&sdev->starved_entry);
1328 return 1; 1323 return 1;
1329} 1324}
1330 1325
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1b214910b71..f59d4a05ecd 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
3048 3048
3049 spin_lock_irqsave(shost->host_lock, flags); 3049 spin_lock_irqsave(shost->host_lock, flags);
3050 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | 3050 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3051 FC_RPORT_DEVLOSS_PENDING); 3051 FC_RPORT_DEVLOSS_PENDING |
3052 FC_RPORT_DEVLOSS_CALLBK_DONE);
3052 spin_unlock_irqrestore(shost->host_lock, flags); 3053 spin_unlock_irqrestore(shost->host_lock, flags);
3053 3054
3054 /* ensure any stgt delete functions are done */ 3055 /* ensure any stgt delete functions are done */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 02d99982a74..eacd46bb36b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2368,16 +2368,15 @@ static ssize_t
2368sg_proc_write_adio(struct file *filp, const char __user *buffer, 2368sg_proc_write_adio(struct file *filp, const char __user *buffer,
2369 size_t count, loff_t *off) 2369 size_t count, loff_t *off)
2370{ 2370{
2371 int num; 2371 int err;
2372 char buff[11]; 2372 unsigned long num;
2373 2373
2374 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2374 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2375 return -EACCES; 2375 return -EACCES;
2376 num = (count < 10) ? count : 10; 2376 err = kstrtoul_from_user(buffer, count, 0, &num);
2377 if (copy_from_user(buff, buffer, num)) 2377 if (err)
2378 return -EFAULT; 2378 return err;
2379 buff[num] = '\0'; 2379 sg_allow_dio = num ? 1 : 0;
2380 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2381 return count; 2380 return count;
2382} 2381}
2383 2382
@@ -2390,17 +2389,15 @@ static ssize_t
2390sg_proc_write_dressz(struct file *filp, const char __user *buffer, 2389sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2391 size_t count, loff_t *off) 2390 size_t count, loff_t *off)
2392{ 2391{
2393 int num; 2392 int err;
2394 unsigned long k = ULONG_MAX; 2393 unsigned long k = ULONG_MAX;
2395 char buff[11];
2396 2394
2397 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2395 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2398 return -EACCES; 2396 return -EACCES;
2399 num = (count < 10) ? count : 10; 2397
2400 if (copy_from_user(buff, buffer, num)) 2398 err = kstrtoul_from_user(buffer, count, 0, &k);
2401 return -EFAULT; 2399 if (err)
2402 buff[num] = '\0'; 2400 return err;
2403 k = simple_strtoul(buff, NULL, 10);
2404 if (k <= 1048576) { /* limit "big buff" to 1 MB */ 2401 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2405 sg_big_buff = k; 2402 sg_big_buff = k;
2406 return count; 2403 return count;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index b4543f575f4..36d1ed7817e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
839 struct sym_lcb *lp = sym_lp(tp, sdev->lun); 839 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
840 unsigned long flags; 840 unsigned long flags;
841 841
842 /* if slave_alloc returned before allocating a sym_lcb, return */
843 if (!lp)
844 return;
845
842 spin_lock_irqsave(np->s.host->host_lock, flags); 846 spin_lock_irqsave(np->s.host->host_lock, flags);
843 847
844 if (lp->busy_itlq || lp->busy_itl) { 848 if (lp->busy_itlq || lp->busy_itl) {
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e743a45ee92..8418eb03665 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -131,7 +131,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
131 rxchan = dws->rxchan; 131 rxchan = dws->rxchan;
132 132
133 /* 2. Prepare the TX dma transfer */ 133 /* 2. Prepare the TX dma transfer */
134 txconf.direction = DMA_TO_DEVICE; 134 txconf.direction = DMA_MEM_TO_DEV;
135 txconf.dst_addr = dws->dma_addr; 135 txconf.dst_addr = dws->dma_addr;
136 txconf.dst_maxburst = LNW_DMA_MSIZE_16; 136 txconf.dst_maxburst = LNW_DMA_MSIZE_16;
137 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 137 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -147,13 +147,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
147 txdesc = txchan->device->device_prep_slave_sg(txchan, 147 txdesc = txchan->device->device_prep_slave_sg(txchan,
148 &dws->tx_sgl, 148 &dws->tx_sgl,
149 1, 149 1,
150 DMA_TO_DEVICE, 150 DMA_MEM_TO_DEV,
151 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 151 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
152 txdesc->callback = dw_spi_dma_done; 152 txdesc->callback = dw_spi_dma_done;
153 txdesc->callback_param = dws; 153 txdesc->callback_param = dws;
154 154
155 /* 3. Prepare the RX dma transfer */ 155 /* 3. Prepare the RX dma transfer */
156 rxconf.direction = DMA_FROM_DEVICE; 156 rxconf.direction = DMA_DEV_TO_MEM;
157 rxconf.src_addr = dws->dma_addr; 157 rxconf.src_addr = dws->dma_addr;
158 rxconf.src_maxburst = LNW_DMA_MSIZE_16; 158 rxconf.src_maxburst = LNW_DMA_MSIZE_16;
159 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 159 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -169,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
169 rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 169 rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
170 &dws->rx_sgl, 170 &dws->rx_sgl,
171 1, 171 1,
172 DMA_FROM_DEVICE, 172 DMA_DEV_TO_MEM,
173 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 173 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
174 rxdesc->callback = dw_spi_dma_done; 174 rxdesc->callback = dw_spi_dma_done;
175 rxdesc->callback_param = dws; 175 rxdesc->callback_param = dws;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 0a282e5fcc9..d46e55c720b 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -551,6 +551,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
551 struct dma_async_tx_descriptor *txd; 551 struct dma_async_tx_descriptor *txd;
552 enum dma_slave_buswidth buswidth; 552 enum dma_slave_buswidth buswidth;
553 struct dma_slave_config conf; 553 struct dma_slave_config conf;
554 enum dma_transfer_direction slave_dirn;
554 struct scatterlist *sg; 555 struct scatterlist *sg;
555 struct sg_table *sgt; 556 struct sg_table *sgt;
556 struct dma_chan *chan; 557 struct dma_chan *chan;
@@ -573,6 +574,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
573 574
574 conf.src_addr = espi->sspdr_phys; 575 conf.src_addr = espi->sspdr_phys;
575 conf.src_addr_width = buswidth; 576 conf.src_addr_width = buswidth;
577 slave_dirn = DMA_DEV_TO_MEM;
576 } else { 578 } else {
577 chan = espi->dma_tx; 579 chan = espi->dma_tx;
578 buf = t->tx_buf; 580 buf = t->tx_buf;
@@ -580,6 +582,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
580 582
581 conf.dst_addr = espi->sspdr_phys; 583 conf.dst_addr = espi->sspdr_phys;
582 conf.dst_addr_width = buswidth; 584 conf.dst_addr_width = buswidth;
585 slave_dirn = DMA_MEM_TO_DEV;
583 } 586 }
584 587
585 ret = dmaengine_slave_config(chan, &conf); 588 ret = dmaengine_slave_config(chan, &conf);
@@ -631,7 +634,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
631 return ERR_PTR(-ENOMEM); 634 return ERR_PTR(-ENOMEM);
632 635
633 txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, 636 txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
634 dir, DMA_CTRL_ACK); 637 slave_dirn, DMA_CTRL_ACK);
635 if (!txd) { 638 if (!txd) {
636 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 639 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
637 return ERR_PTR(-ENOMEM); 640 return ERR_PTR(-ENOMEM);
@@ -979,7 +982,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
979 dma_cap_set(DMA_SLAVE, mask); 982 dma_cap_set(DMA_SLAVE, mask);
980 983
981 espi->dma_rx_data.port = EP93XX_DMA_SSP; 984 espi->dma_rx_data.port = EP93XX_DMA_SSP;
982 espi->dma_rx_data.direction = DMA_FROM_DEVICE; 985 espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
983 espi->dma_rx_data.name = "ep93xx-spi-rx"; 986 espi->dma_rx_data.name = "ep93xx-spi-rx";
984 987
985 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, 988 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
@@ -990,7 +993,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
990 } 993 }
991 994
992 espi->dma_tx_data.port = EP93XX_DMA_SSP; 995 espi->dma_tx_data.port = EP93XX_DMA_SSP;
993 espi->dma_tx_data.direction = DMA_TO_DEVICE; 996 espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
994 espi->dma_tx_data.name = "ep93xx-spi-tx"; 997 espi->dma_tx_data.name = "ep93xx-spi-tx";
995 998
996 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, 999 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f1f5efbc340..2f9cb43a239 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -900,11 +900,11 @@ static int configure_dma(struct pl022 *pl022)
900{ 900{
901 struct dma_slave_config rx_conf = { 901 struct dma_slave_config rx_conf = {
902 .src_addr = SSP_DR(pl022->phybase), 902 .src_addr = SSP_DR(pl022->phybase),
903 .direction = DMA_FROM_DEVICE, 903 .direction = DMA_DEV_TO_MEM,
904 }; 904 };
905 struct dma_slave_config tx_conf = { 905 struct dma_slave_config tx_conf = {
906 .dst_addr = SSP_DR(pl022->phybase), 906 .dst_addr = SSP_DR(pl022->phybase),
907 .direction = DMA_TO_DEVICE, 907 .direction = DMA_MEM_TO_DEV,
908 }; 908 };
909 unsigned int pages; 909 unsigned int pages;
910 int ret; 910 int ret;
@@ -1041,7 +1041,7 @@ static int configure_dma(struct pl022 *pl022)
1041 rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 1041 rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
1042 pl022->sgt_rx.sgl, 1042 pl022->sgt_rx.sgl,
1043 rx_sglen, 1043 rx_sglen,
1044 DMA_FROM_DEVICE, 1044 DMA_DEV_TO_MEM,
1045 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1045 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1046 if (!rxdesc) 1046 if (!rxdesc)
1047 goto err_rxdesc; 1047 goto err_rxdesc;
@@ -1049,7 +1049,7 @@ static int configure_dma(struct pl022 *pl022)
1049 txdesc = txchan->device->device_prep_slave_sg(txchan, 1049 txdesc = txchan->device->device_prep_slave_sg(txchan,
1050 pl022->sgt_tx.sgl, 1050 pl022->sgt_tx.sgl,
1051 tx_sglen, 1051 tx_sglen,
1052 DMA_TO_DEVICE, 1052 DMA_MEM_TO_DEV,
1053 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1053 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1054 if (!txdesc) 1054 if (!txdesc)
1055 goto err_txdesc; 1055 goto err_txdesc;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 7086583b910..2a6429d8c36 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1079,7 +1079,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1079 } 1079 }
1080 sg = dma->sg_rx_p; 1080 sg = dma->sg_rx_p;
1081 desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg, 1081 desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
1082 num, DMA_FROM_DEVICE, 1082 num, DMA_DEV_TO_MEM,
1083 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1083 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1084 if (!desc_rx) { 1084 if (!desc_rx) {
1085 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", 1085 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
@@ -1124,7 +1124,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1124 } 1124 }
1125 sg = dma->sg_tx_p; 1125 sg = dma->sg_tx_p;
1126 desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx, 1126 desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
1127 sg, num, DMA_TO_DEVICE, 1127 sg, num, DMA_MEM_TO_DEV,
1128 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1128 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1129 if (!desc_tx) { 1129 if (!desc_tx) {
1130 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", 1130 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 8599545cdf9..ac44af165b2 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -27,8 +27,7 @@
27#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
28#include <scsi/iscsi_proto.h> 28#include <scsi/iscsi_proto.h>
29#include <target/target_core_base.h> 29#include <target/target_core_base.h>
30#include <target/target_core_tmr.h> 30#include <target/target_core_fabric.h>
31#include <target/target_core_transport.h>
32 31
33#include "iscsi_target_core.h" 32#include "iscsi_target_core.h"
34#include "iscsi_target_parameters.h" 33#include "iscsi_target_parameters.h"
@@ -284,8 +283,8 @@ static struct iscsi_np *iscsit_get_np(
284 sock_in6 = (struct sockaddr_in6 *)sockaddr; 283 sock_in6 = (struct sockaddr_in6 *)sockaddr;
285 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; 284 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
286 285
287 if (!memcmp((void *)&sock_in6->sin6_addr.in6_u, 286 if (!memcmp(&sock_in6->sin6_addr.in6_u,
288 (void *)&sock_in6_e->sin6_addr.in6_u, 287 &sock_in6_e->sin6_addr.in6_u,
289 sizeof(struct in6_addr))) 288 sizeof(struct in6_addr)))
290 ip_match = 1; 289 ip_match = 1;
291 290
@@ -1225,7 +1224,7 @@ static void iscsit_do_crypto_hash_buf(
1225 1224
1226 crypto_hash_init(hash); 1225 crypto_hash_init(hash);
1227 1226
1228 sg_init_one(&sg, (u8 *)buf, payload_length); 1227 sg_init_one(&sg, buf, payload_length);
1229 crypto_hash_update(hash, &sg, payload_length); 1228 crypto_hash_update(hash, &sg, payload_length);
1230 1229
1231 if (padding) { 1230 if (padding) {
@@ -1603,7 +1602,7 @@ static int iscsit_handle_nop_out(
1603 /* 1602 /*
1604 * Attach ping data to struct iscsi_cmd->buf_ptr. 1603 * Attach ping data to struct iscsi_cmd->buf_ptr.
1605 */ 1604 */
1606 cmd->buf_ptr = (void *)ping_data; 1605 cmd->buf_ptr = ping_data;
1607 cmd->buf_ptr_size = payload_length; 1606 cmd->buf_ptr_size = payload_length;
1608 1607
1609 pr_debug("Got %u bytes of NOPOUT ping" 1608 pr_debug("Got %u bytes of NOPOUT ping"
@@ -3197,7 +3196,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3197 end_of_buf = 1; 3196 end_of_buf = 1;
3198 goto eob; 3197 goto eob;
3199 } 3198 }
3200 memcpy((void *)payload + payload_len, buf, len); 3199 memcpy(payload + payload_len, buf, len);
3201 payload_len += len; 3200 payload_len += len;
3202 3201
3203 spin_lock(&tiqn->tiqn_tpg_lock); 3202 spin_lock(&tiqn->tiqn_tpg_lock);
@@ -3229,7 +3228,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3229 end_of_buf = 1; 3228 end_of_buf = 1;
3230 goto eob; 3229 goto eob;
3231 } 3230 }
3232 memcpy((void *)payload + payload_len, buf, len); 3231 memcpy(payload + payload_len, buf, len);
3233 payload_len += len; 3232 payload_len += len;
3234 } 3233 }
3235 spin_unlock(&tpg->tpg_np_lock); 3234 spin_unlock(&tpg->tpg_np_lock);
@@ -3486,7 +3485,7 @@ int iscsi_target_tx_thread(void *arg)
3486 struct iscsi_conn *conn; 3485 struct iscsi_conn *conn;
3487 struct iscsi_queue_req *qr = NULL; 3486 struct iscsi_queue_req *qr = NULL;
3488 struct se_cmd *se_cmd; 3487 struct se_cmd *se_cmd;
3489 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; 3488 struct iscsi_thread_set *ts = arg;
3490 /* 3489 /*
3491 * Allow ourselves to be interrupted by SIGINT so that a 3490 * Allow ourselves to be interrupted by SIGINT so that a
3492 * connection recovery / failure event can be triggered externally. 3491 * connection recovery / failure event can be triggered externally.
@@ -3775,7 +3774,7 @@ int iscsi_target_rx_thread(void *arg)
3775 u8 buffer[ISCSI_HDR_LEN], opcode; 3774 u8 buffer[ISCSI_HDR_LEN], opcode;
3776 u32 checksum = 0, digest = 0; 3775 u32 checksum = 0, digest = 0;
3777 struct iscsi_conn *conn = NULL; 3776 struct iscsi_conn *conn = NULL;
3778 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; 3777 struct iscsi_thread_set *ts = arg;
3779 struct kvec iov; 3778 struct kvec iov;
3780 /* 3779 /*
3781 * Allow ourselves to be interrupted by SIGINT so that a 3780 * Allow ourselves to be interrupted by SIGINT so that a
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 1cd6ce373b8..db0cf7c8add 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -82,7 +82,7 @@ static void chap_gen_challenge(
82 unsigned int *c_len) 82 unsigned int *c_len)
83{ 83{
84 unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1]; 84 unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
85 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; 85 struct iscsi_chap *chap = conn->auth_protocol;
86 86
87 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1); 87 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
88 88
@@ -120,7 +120,7 @@ static struct iscsi_chap *chap_server_open(
120 if (!conn->auth_protocol) 120 if (!conn->auth_protocol)
121 return NULL; 121 return NULL;
122 122
123 chap = (struct iscsi_chap *) conn->auth_protocol; 123 chap = conn->auth_protocol;
124 /* 124 /*
125 * We only support MD5 MDA presently. 125 * We only support MD5 MDA presently.
126 */ 126 */
@@ -165,14 +165,15 @@ static int chap_server_compute_md5(
165 unsigned int *nr_out_len) 165 unsigned int *nr_out_len)
166{ 166{
167 char *endptr; 167 char *endptr;
168 unsigned char id, digest[MD5_SIGNATURE_SIZE]; 168 unsigned long id;
169 unsigned char digest[MD5_SIGNATURE_SIZE];
169 unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; 170 unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
170 unsigned char identifier[10], *challenge = NULL; 171 unsigned char identifier[10], *challenge = NULL;
171 unsigned char *challenge_binhex = NULL; 172 unsigned char *challenge_binhex = NULL;
172 unsigned char client_digest[MD5_SIGNATURE_SIZE]; 173 unsigned char client_digest[MD5_SIGNATURE_SIZE];
173 unsigned char server_digest[MD5_SIGNATURE_SIZE]; 174 unsigned char server_digest[MD5_SIGNATURE_SIZE];
174 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; 175 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
175 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; 176 struct iscsi_chap *chap = conn->auth_protocol;
176 struct crypto_hash *tfm; 177 struct crypto_hash *tfm;
177 struct hash_desc desc; 178 struct hash_desc desc;
178 struct scatterlist sg; 179 struct scatterlist sg;
@@ -246,7 +247,7 @@ static int chap_server_compute_md5(
246 goto out; 247 goto out;
247 } 248 }
248 249
249 sg_init_one(&sg, (void *)&chap->id, 1); 250 sg_init_one(&sg, &chap->id, 1);
250 ret = crypto_hash_update(&desc, &sg, 1); 251 ret = crypto_hash_update(&desc, &sg, 1);
251 if (ret < 0) { 252 if (ret < 0) {
252 pr_err("crypto_hash_update() failed for id\n"); 253 pr_err("crypto_hash_update() failed for id\n");
@@ -254,7 +255,7 @@ static int chap_server_compute_md5(
254 goto out; 255 goto out;
255 } 256 }
256 257
257 sg_init_one(&sg, (void *)&auth->password, strlen(auth->password)); 258 sg_init_one(&sg, &auth->password, strlen(auth->password));
258 ret = crypto_hash_update(&desc, &sg, strlen(auth->password)); 259 ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
259 if (ret < 0) { 260 if (ret < 0) {
260 pr_err("crypto_hash_update() failed for password\n"); 261 pr_err("crypto_hash_update() failed for password\n");
@@ -262,7 +263,7 @@ static int chap_server_compute_md5(
262 goto out; 263 goto out;
263 } 264 }
264 265
265 sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH); 266 sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
266 ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH); 267 ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
267 if (ret < 0) { 268 if (ret < 0) {
268 pr_err("crypto_hash_update() failed for challenge\n"); 269 pr_err("crypto_hash_update() failed for challenge\n");
@@ -305,14 +306,17 @@ static int chap_server_compute_md5(
305 } 306 }
306 307
307 if (type == HEX) 308 if (type == HEX)
308 id = (unsigned char)simple_strtoul((char *)&identifier[2], 309 id = simple_strtoul(&identifier[2], &endptr, 0);
309 &endptr, 0);
310 else 310 else
311 id = (unsigned char)simple_strtoul(identifier, &endptr, 0); 311 id = simple_strtoul(identifier, &endptr, 0);
312 if (id > 255) {
313 pr_err("chap identifier: %lu greater than 255\n", id);
314 goto out;
315 }
312 /* 316 /*
313 * RFC 1994 says Identifier is no more than octet (8 bits). 317 * RFC 1994 says Identifier is no more than octet (8 bits).
314 */ 318 */
315 pr_debug("[server] Got CHAP_I=%d\n", id); 319 pr_debug("[server] Got CHAP_I=%lu\n", id);
316 /* 320 /*
317 * Get CHAP_C. 321 * Get CHAP_C.
318 */ 322 */
@@ -351,7 +355,7 @@ static int chap_server_compute_md5(
351 goto out; 355 goto out;
352 } 356 }
353 357
354 sg_init_one(&sg, (void *)&id, 1); 358 sg_init_one(&sg, &id, 1);
355 ret = crypto_hash_update(&desc, &sg, 1); 359 ret = crypto_hash_update(&desc, &sg, 1);
356 if (ret < 0) { 360 if (ret < 0) {
357 pr_err("crypto_hash_update() failed for id\n"); 361 pr_err("crypto_hash_update() failed for id\n");
@@ -359,7 +363,7 @@ static int chap_server_compute_md5(
359 goto out; 363 goto out;
360 } 364 }
361 365
362 sg_init_one(&sg, (void *)auth->password_mutual, 366 sg_init_one(&sg, auth->password_mutual,
363 strlen(auth->password_mutual)); 367 strlen(auth->password_mutual));
364 ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual)); 368 ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
365 if (ret < 0) { 369 if (ret < 0) {
@@ -371,7 +375,7 @@ static int chap_server_compute_md5(
371 /* 375 /*
372 * Convert received challenge to binary hex. 376 * Convert received challenge to binary hex.
373 */ 377 */
374 sg_init_one(&sg, (void *)challenge_binhex, challenge_len); 378 sg_init_one(&sg, challenge_binhex, challenge_len);
375 ret = crypto_hash_update(&desc, &sg, challenge_len); 379 ret = crypto_hash_update(&desc, &sg, challenge_len);
376 if (ret < 0) { 380 if (ret < 0) {
377 pr_err("crypto_hash_update() failed for ma challenge\n"); 381 pr_err("crypto_hash_update() failed for ma challenge\n");
@@ -414,7 +418,7 @@ static int chap_got_response(
414 char *nr_out_ptr, 418 char *nr_out_ptr,
415 unsigned int *nr_out_len) 419 unsigned int *nr_out_len)
416{ 420{
417 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; 421 struct iscsi_chap *chap = conn->auth_protocol;
418 422
419 switch (chap->digest_type) { 423 switch (chap->digest_type) {
420 case CHAP_DIGEST_MD5: 424 case CHAP_DIGEST_MD5:
@@ -437,7 +441,7 @@ u32 chap_main_loop(
437 int *in_len, 441 int *in_len,
438 int *out_len) 442 int *out_len)
439{ 443{
440 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; 444 struct iscsi_chap *chap = conn->auth_protocol;
441 445
442 if (!chap) { 446 if (!chap) {
443 chap = chap_server_open(conn, auth, in_text, out_text, out_len); 447 chap = chap_server_open(conn, auth, in_text, out_text, out_len);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index db327845e46..3468caab47a 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -22,12 +22,8 @@
22#include <linux/configfs.h> 22#include <linux/configfs.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <target/target_core_base.h> 24#include <target/target_core_base.h>
25#include <target/target_core_transport.h> 25#include <target/target_core_fabric.h>
26#include <target/target_core_fabric_ops.h>
27#include <target/target_core_fabric_configfs.h> 26#include <target/target_core_fabric_configfs.h>
28#include <target/target_core_fabric_lib.h>
29#include <target/target_core_device.h>
30#include <target/target_core_tpg.h>
31#include <target/target_core_configfs.h> 27#include <target/target_core_configfs.h>
32#include <target/configfs_macros.h> 28#include <target/configfs_macros.h>
33 29
@@ -56,8 +52,7 @@ struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
56{ 52{
57 struct se_portal_group *se_tpg = container_of(to_config_group(item), 53 struct se_portal_group *se_tpg = container_of(to_config_group(item),
58 struct se_portal_group, tpg_group); 54 struct se_portal_group, tpg_group);
59 struct iscsi_portal_group *tpg = 55 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
60 (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
61 int ret; 56 int ret;
62 57
63 if (!tpg) { 58 if (!tpg) {
@@ -1225,7 +1220,7 @@ struct se_portal_group *lio_target_tiqn_addtpg(
1225 1220
1226 ret = core_tpg_register( 1221 ret = core_tpg_register(
1227 &lio_target_fabric_configfs->tf_ops, 1222 &lio_target_fabric_configfs->tf_ops,
1228 wwn, &tpg->tpg_se_tpg, (void *)tpg, 1223 wwn, &tpg->tpg_se_tpg, tpg,
1229 TRANSPORT_TPG_TYPE_NORMAL); 1224 TRANSPORT_TPG_TYPE_NORMAL);
1230 if (ret < 0) 1225 if (ret < 0)
1231 return NULL; 1226 return NULL;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index a19fa5eea88..f63ea35bc4a 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -21,8 +21,7 @@
21 21
22#include <scsi/scsi_device.h> 22#include <scsi/scsi_device.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_device.h> 24#include <target/target_core_fabric.h>
25#include <target/target_core_transport.h>
26 25
27#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
28#include "iscsi_target_device.h" 27#include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b7ffc3cd40c..478451167b6 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -21,7 +21,7 @@
21 21
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_transport.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h" 27#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 101b1beb3bc..255c0d67e89 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -21,7 +21,7 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_transport.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h" 27#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 0b8404c3012..1af1f21af21 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -21,7 +21,7 @@
21 21
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_transport.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_datain_values.h" 27#include "iscsi_target_datain_values.h"
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index d734bdec24f..373b0cc6abd 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -23,7 +23,7 @@
23#include <linux/crypto.h> 23#include <linux/crypto.h>
24#include <scsi/iscsi_proto.h> 24#include <scsi/iscsi_proto.h>
25#include <target/target_core_base.h> 25#include <target/target_core_base.h>
26#include <target/target_core_transport.h> 26#include <target/target_core_fabric.h>
27 27
28#include "iscsi_target_core.h" 28#include "iscsi_target_core.h"
29#include "iscsi_target_tq.h" 29#include "iscsi_target_tq.h"
@@ -143,7 +143,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
143 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 143 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
144 sess_list) { 144 sess_list) {
145 145
146 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr; 146 sess_p = se_sess->fabric_sess_ptr;
147 spin_lock(&sess_p->conn_lock); 147 spin_lock(&sess_p->conn_lock);
148 if (atomic_read(&sess_p->session_fall_back_to_erl0) || 148 if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
149 atomic_read(&sess_p->session_logout) || 149 atomic_read(&sess_p->session_logout) ||
@@ -151,9 +151,9 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
151 spin_unlock(&sess_p->conn_lock); 151 spin_unlock(&sess_p->conn_lock);
152 continue; 152 continue;
153 } 153 }
154 if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) && 154 if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
155 (!strcmp((void *)sess_p->sess_ops->InitiatorName, 155 (!strcmp(sess_p->sess_ops->InitiatorName,
156 (void *)initiatorname_param->value) && 156 initiatorname_param->value) &&
157 (sess_p->sess_ops->SessionType == sessiontype))) { 157 (sess_p->sess_ops->SessionType == sessiontype))) {
158 atomic_set(&sess_p->session_reinstatement, 1); 158 atomic_set(&sess_p->session_reinstatement, 1);
159 spin_unlock(&sess_p->conn_lock); 159 spin_unlock(&sess_p->conn_lock);
@@ -229,7 +229,7 @@ static int iscsi_login_zero_tsih_s1(
229 229
230 iscsi_login_set_conn_values(sess, conn, pdu->cid); 230 iscsi_login_set_conn_values(sess, conn, pdu->cid);
231 sess->init_task_tag = pdu->itt; 231 sess->init_task_tag = pdu->itt;
232 memcpy((void *)&sess->isid, (void *)pdu->isid, 6); 232 memcpy(&sess->isid, pdu->isid, 6);
233 sess->exp_cmd_sn = pdu->cmdsn; 233 sess->exp_cmd_sn = pdu->cmdsn;
234 INIT_LIST_HEAD(&sess->sess_conn_list); 234 INIT_LIST_HEAD(&sess->sess_conn_list);
235 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list); 235 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
@@ -440,8 +440,7 @@ static int iscsi_login_non_zero_tsih_s2(
440 atomic_read(&sess_p->session_logout) || 440 atomic_read(&sess_p->session_logout) ||
441 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) 441 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
442 continue; 442 continue;
443 if (!memcmp((const void *)sess_p->isid, 443 if (!memcmp(sess_p->isid, pdu->isid, 6) &&
444 (const void *)pdu->isid, 6) &&
445 (sess_p->tsih == pdu->tsih)) { 444 (sess_p->tsih == pdu->tsih)) {
446 iscsit_inc_session_usage_count(sess_p); 445 iscsit_inc_session_usage_count(sess_p);
447 iscsit_stop_time2retain_timer(sess_p); 446 iscsit_stop_time2retain_timer(sess_p);
@@ -654,7 +653,7 @@ static int iscsi_post_login_handler(
654 653
655 spin_lock_bh(&se_tpg->session_lock); 654 spin_lock_bh(&se_tpg->session_lock);
656 __transport_register_session(&sess->tpg->tpg_se_tpg, 655 __transport_register_session(&sess->tpg->tpg_se_tpg,
657 se_sess->se_node_acl, se_sess, (void *)sess); 656 se_sess->se_node_acl, se_sess, sess);
658 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n"); 657 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
659 sess->session_state = TARG_SESS_STATE_LOGGED_IN; 658 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
660 659
@@ -811,7 +810,7 @@ int iscsi_target_setup_login_socket(
811 * Setup the np->np_sockaddr from the passed sockaddr setup 810 * Setup the np->np_sockaddr from the passed sockaddr setup
812 * in iscsi_target_configfs.c code.. 811 * in iscsi_target_configfs.c code..
813 */ 812 */
814 memcpy((void *)&np->np_sockaddr, (void *)sockaddr, 813 memcpy(&np->np_sockaddr, sockaddr,
815 sizeof(struct __kernel_sockaddr_storage)); 814 sizeof(struct __kernel_sockaddr_storage));
816 815
817 if (sockaddr->ss_family == AF_INET6) 816 if (sockaddr->ss_family == AF_INET6)
@@ -821,6 +820,7 @@ int iscsi_target_setup_login_socket(
821 /* 820 /*
822 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY. 821 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
823 */ 822 */
823 /* FIXME: Someone please explain why this is endian-safe */
824 opt = 1; 824 opt = 1;
825 if (np->np_network_transport == ISCSI_TCP) { 825 if (np->np_network_transport == ISCSI_TCP) {
826 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, 826 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
@@ -832,6 +832,7 @@ int iscsi_target_setup_login_socket(
832 } 832 }
833 } 833 }
834 834
835 /* FIXME: Someone please explain why this is endian-safe */
835 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, 836 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
836 (char *)&opt, sizeof(opt)); 837 (char *)&opt, sizeof(opt));
837 if (ret < 0) { 838 if (ret < 0) {
@@ -1206,7 +1207,7 @@ out:
1206 1207
1207int iscsi_target_login_thread(void *arg) 1208int iscsi_target_login_thread(void *arg)
1208{ 1209{
1209 struct iscsi_np *np = (struct iscsi_np *)arg; 1210 struct iscsi_np *np = arg;
1210 int ret; 1211 int ret;
1211 1212
1212 allow_signal(SIGINT); 1213 allow_signal(SIGINT);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 98936cb7c29..e89fa745725 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -21,7 +21,7 @@
21#include <linux/ctype.h> 21#include <linux/ctype.h>
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_tpg.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_parameters.h" 27#include "iscsi_target_parameters.h"
@@ -732,7 +732,7 @@ static void iscsi_initiatorname_tolower(
732 u32 iqn_size = strlen(param_buf), i; 732 u32 iqn_size = strlen(param_buf), i;
733 733
734 for (i = 0; i < iqn_size; i++) { 734 for (i = 0; i < iqn_size; i++) {
735 c = (char *)&param_buf[i]; 735 c = &param_buf[i];
736 if (!isupper(*c)) 736 if (!isupper(*c))
737 continue; 737 continue;
738 738
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index aeafbe0cd7d..b3c699c4fe8 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -19,7 +19,6 @@
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_transport.h>
23 22
24#include "iscsi_target_core.h" 23#include "iscsi_target_core.h"
25#include "iscsi_target_device.h" 24#include "iscsi_target_device.h"
@@ -135,7 +134,7 @@ extern int iscsit_na_nopin_timeout(
135 spin_lock_bh(&se_nacl->nacl_sess_lock); 134 spin_lock_bh(&se_nacl->nacl_sess_lock);
136 se_sess = se_nacl->nacl_sess; 135 se_sess = se_nacl->nacl_sess;
137 if (se_sess) { 136 if (se_sess) {
138 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 137 sess = se_sess->fabric_sess_ptr;
139 138
140 spin_lock(&sess->conn_lock); 139 spin_lock(&sess->conn_lock);
141 list_for_each_entry(conn, &sess->sess_conn_list, 140 list_for_each_entry(conn, &sess->sess_conn_list,
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index f1db83077e0..421d6947dc6 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -23,7 +23,6 @@
23#include <linux/export.h> 23#include <linux/export.h>
24#include <scsi/iscsi_proto.h> 24#include <scsi/iscsi_proto.h>
25#include <target/target_core_base.h> 25#include <target/target_core_base.h>
26#include <target/target_core_transport.h>
27#include <target/configfs_macros.h> 26#include <target/configfs_macros.h>
28 27
29#include "iscsi_target_core.h" 28#include "iscsi_target_core.h"
@@ -746,7 +745,7 @@ static ssize_t iscsi_stat_sess_show_attr_node(
746 spin_lock_bh(&se_nacl->nacl_sess_lock); 745 spin_lock_bh(&se_nacl->nacl_sess_lock);
747 se_sess = se_nacl->nacl_sess; 746 se_sess = se_nacl->nacl_sess;
748 if (se_sess) { 747 if (se_sess) {
749 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 748 sess = se_sess->fabric_sess_ptr;
750 if (sess) 749 if (sess)
751 ret = snprintf(page, PAGE_SIZE, "%u\n", 750 ret = snprintf(page, PAGE_SIZE, "%u\n",
752 sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX); 751 sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
@@ -770,7 +769,7 @@ static ssize_t iscsi_stat_sess_show_attr_indx(
770 spin_lock_bh(&se_nacl->nacl_sess_lock); 769 spin_lock_bh(&se_nacl->nacl_sess_lock);
771 se_sess = se_nacl->nacl_sess; 770 se_sess = se_nacl->nacl_sess;
772 if (se_sess) { 771 if (se_sess) {
773 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 772 sess = se_sess->fabric_sess_ptr;
774 if (sess) 773 if (sess)
775 ret = snprintf(page, PAGE_SIZE, "%u\n", 774 ret = snprintf(page, PAGE_SIZE, "%u\n",
776 sess->session_index); 775 sess->session_index);
@@ -794,7 +793,7 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
794 spin_lock_bh(&se_nacl->nacl_sess_lock); 793 spin_lock_bh(&se_nacl->nacl_sess_lock);
795 se_sess = se_nacl->nacl_sess; 794 se_sess = se_nacl->nacl_sess;
796 if (se_sess) { 795 if (se_sess) {
797 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 796 sess = se_sess->fabric_sess_ptr;
798 if (sess) 797 if (sess)
799 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); 798 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
800 } 799 }
@@ -817,7 +816,7 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
817 spin_lock_bh(&se_nacl->nacl_sess_lock); 816 spin_lock_bh(&se_nacl->nacl_sess_lock);
818 se_sess = se_nacl->nacl_sess; 817 se_sess = se_nacl->nacl_sess;
819 if (se_sess) { 818 if (se_sess) {
820 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 819 sess = se_sess->fabric_sess_ptr;
821 if (sess) 820 if (sess)
822 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); 821 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
823 } 822 }
@@ -840,7 +839,7 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
840 spin_lock_bh(&se_nacl->nacl_sess_lock); 839 spin_lock_bh(&se_nacl->nacl_sess_lock);
841 se_sess = se_nacl->nacl_sess; 840 se_sess = se_nacl->nacl_sess;
842 if (se_sess) { 841 if (se_sess) {
843 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 842 sess = se_sess->fabric_sess_ptr;
844 if (sess) 843 if (sess)
845 ret = snprintf(page, PAGE_SIZE, "%llu\n", 844 ret = snprintf(page, PAGE_SIZE, "%llu\n",
846 (unsigned long long)sess->tx_data_octets); 845 (unsigned long long)sess->tx_data_octets);
@@ -864,7 +863,7 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
864 spin_lock_bh(&se_nacl->nacl_sess_lock); 863 spin_lock_bh(&se_nacl->nacl_sess_lock);
865 se_sess = se_nacl->nacl_sess; 864 se_sess = se_nacl->nacl_sess;
866 if (se_sess) { 865 if (se_sess) {
867 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 866 sess = se_sess->fabric_sess_ptr;
868 if (sess) 867 if (sess)
869 ret = snprintf(page, PAGE_SIZE, "%llu\n", 868 ret = snprintf(page, PAGE_SIZE, "%llu\n",
870 (unsigned long long)sess->rx_data_octets); 869 (unsigned long long)sess->rx_data_octets);
@@ -888,7 +887,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
888 spin_lock_bh(&se_nacl->nacl_sess_lock); 887 spin_lock_bh(&se_nacl->nacl_sess_lock);
889 se_sess = se_nacl->nacl_sess; 888 se_sess = se_nacl->nacl_sess;
890 if (se_sess) { 889 if (se_sess) {
891 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 890 sess = se_sess->fabric_sess_ptr;
892 if (sess) 891 if (sess)
893 ret = snprintf(page, PAGE_SIZE, "%u\n", 892 ret = snprintf(page, PAGE_SIZE, "%u\n",
894 sess->conn_digest_errors); 893 sess->conn_digest_errors);
@@ -912,7 +911,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
912 spin_lock_bh(&se_nacl->nacl_sess_lock); 911 spin_lock_bh(&se_nacl->nacl_sess_lock);
913 se_sess = se_nacl->nacl_sess; 912 se_sess = se_nacl->nacl_sess;
914 if (se_sess) { 913 if (se_sess) {
915 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 914 sess = se_sess->fabric_sess_ptr;
916 if (sess) 915 if (sess)
917 ret = snprintf(page, PAGE_SIZE, "%u\n", 916 ret = snprintf(page, PAGE_SIZE, "%u\n",
918 sess->conn_timeout_errors); 917 sess->conn_timeout_errors);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 490207eacde..255ed35da81 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -21,7 +21,7 @@
21#include <asm/unaligned.h> 21#include <asm/unaligned.h>
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_transport.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h" 27#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index d4cf2cd25c4..879d8d0fa3f 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -19,10 +19,8 @@
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_transport.h> 22#include <target/target_core_fabric.h>
23#include <target/target_core_fabric_ops.h>
24#include <target/target_core_configfs.h> 23#include <target/target_core_configfs.h>
25#include <target/target_core_tpg.h>
26 24
27#include "iscsi_target_core.h" 25#include "iscsi_target_core.h"
28#include "iscsi_target_erl0.h" 26#include "iscsi_target_erl0.h"
@@ -72,7 +70,7 @@ int iscsit_load_discovery_tpg(void)
72 70
73 ret = core_tpg_register( 71 ret = core_tpg_register(
74 &lio_target_fabric_configfs->tf_ops, 72 &lio_target_fabric_configfs->tf_ops,
75 NULL, &tpg->tpg_se_tpg, (void *)tpg, 73 NULL, &tpg->tpg_se_tpg, tpg,
76 TRANSPORT_TPG_TYPE_DISCOVERY); 74 TRANSPORT_TPG_TYPE_DISCOVERY);
77 if (ret < 0) { 75 if (ret < 0) {
78 kfree(tpg); 76 kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 02348f727bd..a05ca1c4f01 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -22,9 +22,7 @@
22#include <scsi/scsi_tcq.h> 22#include <scsi/scsi_tcq.h>
23#include <scsi/iscsi_proto.h> 23#include <scsi/iscsi_proto.h>
24#include <target/target_core_base.h> 24#include <target/target_core_base.h>
25#include <target/target_core_transport.h> 25#include <target/target_core_fabric.h>
26#include <target/target_core_tmr.h>
27#include <target/target_core_fabric_ops.h>
28#include <target/target_core_configfs.h> 26#include <target/target_core_configfs.h>
29 27
30#include "iscsi_target_core.h" 28#include "iscsi_target_core.h"
@@ -289,7 +287,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
289 } 287 }
290 288
291 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, 289 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
292 (void *)cmd->tmr_req, tcm_function, 290 cmd->tmr_req, tcm_function,
293 GFP_KERNEL); 291 GFP_KERNEL);
294 if (!se_cmd->se_tmr_req) 292 if (!se_cmd->se_tmr_req)
295 goto out; 293 goto out;
@@ -1066,7 +1064,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
1066 if (tiqn) { 1064 if (tiqn) {
1067 spin_lock_bh(&tiqn->sess_err_stats.lock); 1065 spin_lock_bh(&tiqn->sess_err_stats.lock);
1068 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, 1066 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
1069 (void *)conn->sess->sess_ops->InitiatorName); 1067 conn->sess->sess_ops->InitiatorName);
1070 tiqn->sess_err_stats.last_sess_failure_type = 1068 tiqn->sess_err_stats.last_sess_failure_type =
1071 ISCSI_SESS_ERR_CXN_TIMEOUT; 1069 ISCSI_SESS_ERR_CXN_TIMEOUT;
1072 tiqn->sess_err_stats.cxn_timeout_errors++; 1070 tiqn->sess_err_stats.cxn_timeout_errors++;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 81d5832fbbd..c47ff7f59e5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -33,14 +33,9 @@
33#include <scsi/scsi_cmnd.h> 33#include <scsi/scsi_cmnd.h>
34 34
35#include <target/target_core_base.h> 35#include <target/target_core_base.h>
36#include <target/target_core_transport.h> 36#include <target/target_core_fabric.h>
37#include <target/target_core_fabric_ops.h>
38#include <target/target_core_fabric_configfs.h> 37#include <target/target_core_fabric_configfs.h>
39#include <target/target_core_fabric_lib.h>
40#include <target/target_core_configfs.h> 38#include <target/target_core_configfs.h>
41#include <target/target_core_device.h>
42#include <target/target_core_tpg.h>
43#include <target/target_core_tmr.h>
44 39
45#include "tcm_loop.h" 40#include "tcm_loop.h"
46 41
@@ -421,11 +416,11 @@ static struct scsi_host_template tcm_loop_driver_template = {
421 .queuecommand = tcm_loop_queuecommand, 416 .queuecommand = tcm_loop_queuecommand,
422 .change_queue_depth = tcm_loop_change_queue_depth, 417 .change_queue_depth = tcm_loop_change_queue_depth,
423 .eh_device_reset_handler = tcm_loop_device_reset, 418 .eh_device_reset_handler = tcm_loop_device_reset,
424 .can_queue = TL_SCSI_CAN_QUEUE, 419 .can_queue = 1024,
425 .this_id = -1, 420 .this_id = -1,
426 .sg_tablesize = TL_SCSI_SG_TABLESIZE, 421 .sg_tablesize = 256,
427 .cmd_per_lun = TL_SCSI_CMD_PER_LUN, 422 .cmd_per_lun = 1024,
428 .max_sectors = TL_SCSI_MAX_SECTORS, 423 .max_sectors = 0xFFFF,
429 .use_clustering = DISABLE_CLUSTERING, 424 .use_clustering = DISABLE_CLUSTERING,
430 .slave_alloc = tcm_loop_slave_alloc, 425 .slave_alloc = tcm_loop_slave_alloc,
431 .slave_configure = tcm_loop_slave_configure, 426 .slave_configure = tcm_loop_slave_configure,
@@ -564,8 +559,7 @@ static char *tcm_loop_get_fabric_name(void)
564 559
565static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) 560static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
566{ 561{
567 struct tcm_loop_tpg *tl_tpg = 562 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
568 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
569 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 563 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
570 /* 564 /*
571 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba() 565 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
@@ -592,8 +586,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
592 586
593static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 587static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
594{ 588{
595 struct tcm_loop_tpg *tl_tpg = 589 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
596 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
597 /* 590 /*
598 * Return the passed NAA identifier for the SAS Target Port 591 * Return the passed NAA identifier for the SAS Target Port
599 */ 592 */
@@ -602,8 +595,7 @@ static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
602 595
603static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) 596static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
604{ 597{
605 struct tcm_loop_tpg *tl_tpg = 598 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
606 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
607 /* 599 /*
608 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 600 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
609 * to represent the SCSI Target Port. 601 * to represent the SCSI Target Port.
@@ -623,8 +615,7 @@ static u32 tcm_loop_get_pr_transport_id(
623 int *format_code, 615 int *format_code,
624 unsigned char *buf) 616 unsigned char *buf)
625{ 617{
626 struct tcm_loop_tpg *tl_tpg = 618 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
627 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
628 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 619 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
629 620
630 switch (tl_hba->tl_proto_id) { 621 switch (tl_hba->tl_proto_id) {
@@ -653,8 +644,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
653 struct t10_pr_registration *pr_reg, 644 struct t10_pr_registration *pr_reg,
654 int *format_code) 645 int *format_code)
655{ 646{
656 struct tcm_loop_tpg *tl_tpg = 647 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
657 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
658 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 648 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
659 649
660 switch (tl_hba->tl_proto_id) { 650 switch (tl_hba->tl_proto_id) {
@@ -687,8 +677,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
687 u32 *out_tid_len, 677 u32 *out_tid_len,
688 char **port_nexus_ptr) 678 char **port_nexus_ptr)
689{ 679{
690 struct tcm_loop_tpg *tl_tpg = 680 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
691 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
692 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 681 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
693 682
694 switch (tl_hba->tl_proto_id) { 683 switch (tl_hba->tl_proto_id) {
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 6b76c7a22bb..15a03644147 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,16 +1,7 @@
1#define TCM_LOOP_VERSION "v2.1-rc1" 1#define TCM_LOOP_VERSION "v2.1-rc1"
2#define TL_WWN_ADDR_LEN 256 2#define TL_WWN_ADDR_LEN 256
3#define TL_TPGS_PER_HBA 32 3#define TL_TPGS_PER_HBA 32
4/* 4
5 * Defaults for struct scsi_host_template tcm_loop_driver_template
6 *
7 * We use large can_queue and cmd_per_lun here and let TCM enforce
8 * the underlying se_device_t->queue_depth.
9 */
10#define TL_SCSI_CAN_QUEUE 1024
11#define TL_SCSI_CMD_PER_LUN 1024
12#define TL_SCSI_MAX_SECTORS 1024
13#define TL_SCSI_SG_TABLESIZE 256
14/* 5/*
15 * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len 6 * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
16 */ 7 */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 1dcbef499d6..1b1edd14f4b 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -32,13 +32,12 @@
32#include <scsi/scsi_cmnd.h> 32#include <scsi/scsi_cmnd.h>
33 33
34#include <target/target_core_base.h> 34#include <target/target_core_base.h>
35#include <target/target_core_device.h> 35#include <target/target_core_backend.h>
36#include <target/target_core_transport.h> 36#include <target/target_core_fabric.h>
37#include <target/target_core_fabric_ops.h>
38#include <target/target_core_configfs.h> 37#include <target/target_core_configfs.h>
39 38
39#include "target_core_internal.h"
40#include "target_core_alua.h" 40#include "target_core_alua.h"
41#include "target_core_hba.h"
42#include "target_core_ua.h" 41#include "target_core_ua.h"
43 42
44static int core_alua_check_transition(int state, int *primary); 43static int core_alua_check_transition(int state, int *primary);
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 831468b3163..2f2235edeff 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -29,10 +29,11 @@
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include <target/target_core_base.h> 31#include <target/target_core_base.h>
32#include <target/target_core_transport.h> 32#include <target/target_core_backend.h>
33#include <target/target_core_fabric_ops.h> 33#include <target/target_core_fabric.h>
34
35#include "target_core_internal.h"
34#include "target_core_ua.h" 36#include "target_core_ua.h"
35#include "target_core_cdb.h"
36 37
37static void 38static void
38target_fill_alua_data(struct se_port *port, unsigned char *buf) 39target_fill_alua_data(struct se_port *port, unsigned char *buf)
@@ -94,6 +95,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
94 buf[2] = dev->transport->get_device_rev(dev); 95 buf[2] = dev->transport->get_device_rev(dev);
95 96
96 /* 97 /*
98 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
99 *
100 * SPC4 says:
101 * A RESPONSE DATA FORMAT field set to 2h indicates that the
102 * standard INQUIRY data is in the format defined in this
103 * standard. Response data format values less than 2h are
104 * obsolete. Response data format values greater than 2h are
105 * reserved.
106 */
107 buf[3] = 2;
108
109 /*
97 * Enable SCCS and TPGS fields for Emulated ALUA 110 * Enable SCCS and TPGS fields for Emulated ALUA
98 */ 111 */
99 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) 112 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
@@ -115,11 +128,9 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
115 goto out; 128 goto out;
116 } 129 }
117 130
118 snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); 131 snprintf(&buf[8], 8, "LIO-ORG");
119 snprintf((unsigned char *)&buf[16], 16, "%s", 132 snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
120 &dev->se_sub_dev->t10_wwn.model[0]); 133 snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
121 snprintf((unsigned char *)&buf[32], 4, "%s",
122 &dev->se_sub_dev->t10_wwn.revision[0]);
123 buf[4] = 31; /* Set additional length to 31 */ 134 buf[4] = 31; /* Set additional length to 31 */
124 135
125out: 136out:
@@ -138,8 +149,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
138 SDF_EMULATED_VPD_UNIT_SERIAL) { 149 SDF_EMULATED_VPD_UNIT_SERIAL) {
139 u32 unit_serial_len; 150 u32 unit_serial_len;
140 151
141 unit_serial_len = 152 unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
142 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
143 unit_serial_len++; /* For NULL Terminator */ 153 unit_serial_len++; /* For NULL Terminator */
144 154
145 if (((len + 4) + unit_serial_len) > cmd->data_length) { 155 if (((len + 4) + unit_serial_len) > cmd->data_length) {
@@ -148,8 +158,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
148 buf[3] = (len & 0xff); 158 buf[3] = (len & 0xff);
149 return 0; 159 return 0;
150 } 160 }
151 len += sprintf((unsigned char *)&buf[4], "%s", 161 len += sprintf(&buf[4], "%s",
152 &dev->se_sub_dev->t10_wwn.unit_serial[0]); 162 dev->se_sub_dev->t10_wwn.unit_serial);
153 len++; /* Extra Byte for NULL Terminator */ 163 len++; /* Extra Byte for NULL Terminator */
154 buf[3] = len; 164 buf[3] = len;
155 } 165 }
@@ -279,14 +289,13 @@ check_t10_vend_desc:
279 len += (prod_len + unit_serial_len); 289 len += (prod_len + unit_serial_len);
280 goto check_port; 290 goto check_port;
281 } 291 }
282 id_len += sprintf((unsigned char *)&buf[off+12], 292 id_len += sprintf(&buf[off+12], "%s:%s", prod,
283 "%s:%s", prod,
284 &dev->se_sub_dev->t10_wwn.unit_serial[0]); 293 &dev->se_sub_dev->t10_wwn.unit_serial[0]);
285 } 294 }
286 buf[off] = 0x2; /* ASCII */ 295 buf[off] = 0x2; /* ASCII */
287 buf[off+1] = 0x1; /* T10 Vendor ID */ 296 buf[off+1] = 0x1; /* T10 Vendor ID */
288 buf[off+2] = 0x0; 297 buf[off+2] = 0x0;
289 memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8); 298 memcpy(&buf[off+4], "LIO-ORG", 8);
290 /* Extra Byte for NULL Terminator */ 299 /* Extra Byte for NULL Terminator */
291 id_len++; 300 id_len++;
292 /* Identifier Length */ 301 /* Identifier Length */
diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h
deleted file mode 100644
index ad6b1e39300..00000000000
--- a/drivers/target/target_core_cdb.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef TARGET_CORE_CDB_H
2#define TARGET_CORE_CDB_H
3
4int target_emulate_inquiry(struct se_task *task);
5int target_emulate_readcapacity(struct se_task *task);
6int target_emulate_readcapacity_16(struct se_task *task);
7int target_emulate_modesense(struct se_task *task);
8int target_emulate_request_sense(struct se_task *task);
9int target_emulate_unmap(struct se_task *task);
10int target_emulate_write_same(struct se_task *task);
11int target_emulate_synchronize_cache(struct se_task *task);
12int target_emulate_noop(struct se_task *task);
13
14#endif /* TARGET_CORE_CDB_H */
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 93d4f6a1b79..0955bb8979f 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -39,18 +39,16 @@
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40 40
41#include <target/target_core_base.h> 41#include <target/target_core_base.h>
42#include <target/target_core_device.h> 42#include <target/target_core_backend.h>
43#include <target/target_core_transport.h> 43#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_fabric_configfs.h> 44#include <target/target_core_fabric_configfs.h>
46#include <target/target_core_configfs.h> 45#include <target/target_core_configfs.h>
47#include <target/configfs_macros.h> 46#include <target/configfs_macros.h>
48 47
48#include "target_core_internal.h"
49#include "target_core_alua.h" 49#include "target_core_alua.h"
50#include "target_core_hba.h"
51#include "target_core_pr.h" 50#include "target_core_pr.h"
52#include "target_core_rd.h" 51#include "target_core_rd.h"
53#include "target_core_stat.h"
54 52
55extern struct t10_alua_lu_gp *default_lu_gp; 53extern struct t10_alua_lu_gp *default_lu_gp;
56 54
@@ -1452,7 +1450,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1452 return -ENOMEM; 1450 return -ENOMEM;
1453 1451
1454 orig = opts; 1452 orig = opts;
1455 while ((ptr = strsep(&opts, ",")) != NULL) { 1453 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1456 if (!*ptr) 1454 if (!*ptr)
1457 continue; 1455 continue;
1458 1456
@@ -1631,7 +1629,7 @@ static struct config_item_type target_core_dev_pr_cit = {
1631 1629
1632static ssize_t target_core_show_dev_info(void *p, char *page) 1630static ssize_t target_core_show_dev_info(void *p, char *page)
1633{ 1631{
1634 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1632 struct se_subsystem_dev *se_dev = p;
1635 struct se_hba *hba = se_dev->se_dev_hba; 1633 struct se_hba *hba = se_dev->se_dev_hba;
1636 struct se_subsystem_api *t = hba->transport; 1634 struct se_subsystem_api *t = hba->transport;
1637 int bl = 0; 1635 int bl = 0;
@@ -1659,7 +1657,7 @@ static ssize_t target_core_store_dev_control(
1659 const char *page, 1657 const char *page,
1660 size_t count) 1658 size_t count)
1661{ 1659{
1662 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1660 struct se_subsystem_dev *se_dev = p;
1663 struct se_hba *hba = se_dev->se_dev_hba; 1661 struct se_hba *hba = se_dev->se_dev_hba;
1664 struct se_subsystem_api *t = hba->transport; 1662 struct se_subsystem_api *t = hba->transport;
1665 1663
@@ -1682,7 +1680,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
1682 1680
1683static ssize_t target_core_show_dev_alias(void *p, char *page) 1681static ssize_t target_core_show_dev_alias(void *p, char *page)
1684{ 1682{
1685 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1683 struct se_subsystem_dev *se_dev = p;
1686 1684
1687 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS)) 1685 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
1688 return 0; 1686 return 0;
@@ -1695,7 +1693,7 @@ static ssize_t target_core_store_dev_alias(
1695 const char *page, 1693 const char *page,
1696 size_t count) 1694 size_t count)
1697{ 1695{
1698 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1696 struct se_subsystem_dev *se_dev = p;
1699 struct se_hba *hba = se_dev->se_dev_hba; 1697 struct se_hba *hba = se_dev->se_dev_hba;
1700 ssize_t read_bytes; 1698 ssize_t read_bytes;
1701 1699
@@ -1710,6 +1708,9 @@ static ssize_t target_core_store_dev_alias(
1710 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1708 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
1711 "%s", page); 1709 "%s", page);
1712 1710
1711 if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
1712 se_dev->se_dev_alias[read_bytes - 1] = '\0';
1713
1713 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 1714 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1714 config_item_name(&hba->hba_group.cg_item), 1715 config_item_name(&hba->hba_group.cg_item),
1715 config_item_name(&se_dev->se_dev_group.cg_item), 1716 config_item_name(&se_dev->se_dev_group.cg_item),
@@ -1728,7 +1729,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1728 1729
1729static ssize_t target_core_show_dev_udev_path(void *p, char *page) 1730static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1730{ 1731{
1731 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1732 struct se_subsystem_dev *se_dev = p;
1732 1733
1733 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) 1734 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
1734 return 0; 1735 return 0;
@@ -1741,7 +1742,7 @@ static ssize_t target_core_store_dev_udev_path(
1741 const char *page, 1742 const char *page,
1742 size_t count) 1743 size_t count)
1743{ 1744{
1744 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1745 struct se_subsystem_dev *se_dev = p;
1745 struct se_hba *hba = se_dev->se_dev_hba; 1746 struct se_hba *hba = se_dev->se_dev_hba;
1746 ssize_t read_bytes; 1747 ssize_t read_bytes;
1747 1748
@@ -1756,6 +1757,9 @@ static ssize_t target_core_store_dev_udev_path(
1756 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1757 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
1757 "%s", page); 1758 "%s", page);
1758 1759
1760 if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
1761 se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
1762
1759 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1763 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1760 config_item_name(&hba->hba_group.cg_item), 1764 config_item_name(&hba->hba_group.cg_item),
1761 config_item_name(&se_dev->se_dev_group.cg_item), 1765 config_item_name(&se_dev->se_dev_group.cg_item),
@@ -1777,7 +1781,7 @@ static ssize_t target_core_store_dev_enable(
1777 const char *page, 1781 const char *page,
1778 size_t count) 1782 size_t count)
1779{ 1783{
1780 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1784 struct se_subsystem_dev *se_dev = p;
1781 struct se_device *dev; 1785 struct se_device *dev;
1782 struct se_hba *hba = se_dev->se_dev_hba; 1786 struct se_hba *hba = se_dev->se_dev_hba;
1783 struct se_subsystem_api *t = hba->transport; 1787 struct se_subsystem_api *t = hba->transport;
@@ -1822,7 +1826,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1822static ssize_t target_core_show_alua_lu_gp(void *p, char *page) 1826static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1823{ 1827{
1824 struct se_device *dev; 1828 struct se_device *dev;
1825 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; 1829 struct se_subsystem_dev *su_dev = p;
1826 struct config_item *lu_ci; 1830 struct config_item *lu_ci;
1827 struct t10_alua_lu_gp *lu_gp; 1831 struct t10_alua_lu_gp *lu_gp;
1828 struct t10_alua_lu_gp_member *lu_gp_mem; 1832 struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1860,7 +1864,7 @@ static ssize_t target_core_store_alua_lu_gp(
1860 size_t count) 1864 size_t count)
1861{ 1865{
1862 struct se_device *dev; 1866 struct se_device *dev;
1863 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; 1867 struct se_subsystem_dev *su_dev = p;
1864 struct se_hba *hba = su_dev->se_dev_hba; 1868 struct se_hba *hba = su_dev->se_dev_hba;
1865 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; 1869 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1866 struct t10_alua_lu_gp_member *lu_gp_mem; 1870 struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 9b863942547..0c5992f0d94 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -42,13 +42,11 @@
42#include <scsi/scsi_device.h> 42#include <scsi/scsi_device.h>
43 43
44#include <target/target_core_base.h> 44#include <target/target_core_base.h>
45#include <target/target_core_device.h> 45#include <target/target_core_backend.h>
46#include <target/target_core_tpg.h> 46#include <target/target_core_fabric.h>
47#include <target/target_core_transport.h>
48#include <target/target_core_fabric_ops.h>
49 47
48#include "target_core_internal.h"
50#include "target_core_alua.h" 49#include "target_core_alua.h"
51#include "target_core_hba.h"
52#include "target_core_pr.h" 50#include "target_core_pr.h"
53#include "target_core_ua.h" 51#include "target_core_ua.h"
54 52
@@ -1134,8 +1132,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1134 */ 1132 */
1135int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 1133int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1136{ 1134{
1137 u32 orig_queue_depth = dev->queue_depth;
1138
1139 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1135 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1140 pr_err("dev[%p]: Unable to change SE Device TCQ while" 1136 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1141 " dev_export_obj: %d count exists\n", dev, 1137 " dev_export_obj: %d count exists\n", dev,
@@ -1169,11 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1169 } 1165 }
1170 1166
1171 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1167 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1172 if (queue_depth > orig_queue_depth)
1173 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1174 else if (queue_depth < orig_queue_depth)
1175 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1176
1177 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1168 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1178 dev, queue_depth); 1169 dev, queue_depth);
1179 return 0; 1170 return 0;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 09b6f8729f9..4f77cce2264 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -36,18 +36,14 @@
36#include <linux/configfs.h> 36#include <linux/configfs.h>
37 37
38#include <target/target_core_base.h> 38#include <target/target_core_base.h>
39#include <target/target_core_device.h> 39#include <target/target_core_fabric.h>
40#include <target/target_core_tpg.h>
41#include <target/target_core_transport.h>
42#include <target/target_core_fabric_ops.h>
43#include <target/target_core_fabric_configfs.h> 40#include <target/target_core_fabric_configfs.h>
44#include <target/target_core_configfs.h> 41#include <target/target_core_configfs.h>
45#include <target/configfs_macros.h> 42#include <target/configfs_macros.h>
46 43
44#include "target_core_internal.h"
47#include "target_core_alua.h" 45#include "target_core_alua.h"
48#include "target_core_hba.h"
49#include "target_core_pr.h" 46#include "target_core_pr.h"
50#include "target_core_stat.h"
51 47
52#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ 48#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
53static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ 49static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index ec4249be617..283a36e464e 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -34,13 +34,10 @@
34#include <scsi/scsi_cmnd.h> 34#include <scsi/scsi_cmnd.h>
35 35
36#include <target/target_core_base.h> 36#include <target/target_core_base.h>
37#include <target/target_core_device.h> 37#include <target/target_core_fabric.h>
38#include <target/target_core_transport.h>
39#include <target/target_core_fabric_lib.h>
40#include <target/target_core_fabric_ops.h>
41#include <target/target_core_configfs.h> 38#include <target/target_core_configfs.h>
42 39
43#include "target_core_hba.h" 40#include "target_core_internal.h"
44#include "target_core_pr.h" 41#include "target_core_pr.h"
45 42
46/* 43/*
@@ -402,7 +399,7 @@ char *iscsi_parse_pr_out_transport_id(
402 add_len = ((buf[2] >> 8) & 0xff); 399 add_len = ((buf[2] >> 8) & 0xff);
403 add_len |= (buf[3] & 0xff); 400 add_len |= (buf[3] & 0xff);
404 401
405 tid_len = strlen((char *)&buf[4]); 402 tid_len = strlen(&buf[4]);
406 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ 403 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
407 tid_len += 1; /* Add one byte for NULL terminator */ 404 tid_len += 1; /* Add one byte for NULL terminator */
408 padding = ((-tid_len) & 3); 405 padding = ((-tid_len) & 3);
@@ -423,11 +420,11 @@ char *iscsi_parse_pr_out_transport_id(
423 * format. 420 * format.
424 */ 421 */
425 if (format_code == 0x40) { 422 if (format_code == 0x40) {
426 p = strstr((char *)&buf[4], ",i,0x"); 423 p = strstr(&buf[4], ",i,0x");
427 if (!p) { 424 if (!p) {
428 pr_err("Unable to locate \",i,0x\" seperator" 425 pr_err("Unable to locate \",i,0x\" seperator"
429 " for Initiator port identifier: %s\n", 426 " for Initiator port identifier: %s\n",
430 (char *)&buf[4]); 427 &buf[4]);
431 return NULL; 428 return NULL;
432 } 429 }
433 *p = '\0'; /* Terminate iSCSI Name */ 430 *p = '\0'; /* Terminate iSCSI Name */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b4864fba4ef..7ed58e2df79 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,8 +37,7 @@
37#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
38 38
39#include <target/target_core_base.h> 39#include <target/target_core_base.h>
40#include <target/target_core_device.h> 40#include <target/target_core_backend.h>
41#include <target/target_core_transport.h>
42 41
43#include "target_core_file.h" 42#include "target_core_file.h"
44 43
@@ -86,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba)
86static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) 85static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
87{ 86{
88 struct fd_dev *fd_dev; 87 struct fd_dev *fd_dev;
89 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 88 struct fd_host *fd_host = hba->hba_ptr;
90 89
91 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 90 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
92 if (!fd_dev) { 91 if (!fd_dev) {
@@ -114,8 +113,8 @@ static struct se_device *fd_create_virtdevice(
114 struct se_device *dev; 113 struct se_device *dev;
115 struct se_dev_limits dev_limits; 114 struct se_dev_limits dev_limits;
116 struct queue_limits *limits; 115 struct queue_limits *limits;
117 struct fd_dev *fd_dev = (struct fd_dev *) p; 116 struct fd_dev *fd_dev = p;
118 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 117 struct fd_host *fd_host = hba->hba_ptr;
119 mm_segment_t old_fs; 118 mm_segment_t old_fs;
120 struct file *file; 119 struct file *file;
121 struct inode *inode = NULL; 120 struct inode *inode = NULL;
@@ -240,7 +239,7 @@ fail:
240 */ 239 */
241static void fd_free_device(void *p) 240static void fd_free_device(void *p)
242{ 241{
243 struct fd_dev *fd_dev = (struct fd_dev *) p; 242 struct fd_dev *fd_dev = p;
244 243
245 if (fd_dev->fd_file) { 244 if (fd_dev->fd_file) {
246 filp_close(fd_dev->fd_file, NULL); 245 filp_close(fd_dev->fd_file, NULL);
@@ -498,7 +497,7 @@ static ssize_t fd_set_configfs_dev_params(
498 497
499 orig = opts; 498 orig = opts;
500 499
501 while ((ptr = strsep(&opts, ",")) != NULL) { 500 while ((ptr = strsep(&opts, ",\n")) != NULL) {
502 if (!*ptr) 501 if (!*ptr)
503 continue; 502 continue;
504 503
@@ -559,7 +558,7 @@ out:
559 558
560static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 559static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
561{ 560{
562 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; 561 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
563 562
564 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 563 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
565 pr_err("Missing fd_dev_name=\n"); 564 pr_err("Missing fd_dev_name=\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index c68019d6c40..3dd1bd4b6f7 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -37,11 +37,10 @@
37#include <net/tcp.h> 37#include <net/tcp.h>
38 38
39#include <target/target_core_base.h> 39#include <target/target_core_base.h>
40#include <target/target_core_device.h> 40#include <target/target_core_backend.h>
41#include <target/target_core_tpg.h> 41#include <target/target_core_fabric.h>
42#include <target/target_core_transport.h>
43 42
44#include "target_core_hba.h" 43#include "target_core_internal.h"
45 44
46static LIST_HEAD(subsystem_list); 45static LIST_HEAD(subsystem_list);
47static DEFINE_MUTEX(subsystem_mutex); 46static DEFINE_MUTEX(subsystem_mutex);
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
deleted file mode 100644
index bb0fea5f730..00000000000
--- a/drivers/target/target_core_hba.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef TARGET_CORE_HBA_H
2#define TARGET_CORE_HBA_H
3
4extern struct se_hba *core_alloc_hba(const char *, u32, u32);
5extern int core_delete_hba(struct se_hba *);
6
7#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 4aa99220443..cc8e6b58ef2 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -42,8 +42,7 @@
42#include <scsi/scsi_host.h> 42#include <scsi/scsi_host.h>
43 43
44#include <target/target_core_base.h> 44#include <target/target_core_base.h>
45#include <target/target_core_device.h> 45#include <target/target_core_backend.h>
46#include <target/target_core_transport.h>
47 46
48#include "target_core_iblock.h" 47#include "target_core_iblock.h"
49 48
@@ -391,7 +390,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
391 390
392 orig = opts; 391 orig = opts;
393 392
394 while ((ptr = strsep(&opts, ",")) != NULL) { 393 while ((ptr = strsep(&opts, ",\n")) != NULL) {
395 if (!*ptr) 394 if (!*ptr)
396 continue; 395 continue;
397 396
@@ -465,7 +464,7 @@ static ssize_t iblock_show_configfs_dev_params(
465 if (bd) { 464 if (bd) {
466 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 465 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
467 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 466 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
468 "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? 467 "" : (bd->bd_holder == ibd) ?
469 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 468 "CLAIMED: IBLOCK" : "CLAIMED: OS");
470 } else { 469 } else {
471 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 470 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
new file mode 100644
index 00000000000..26f135e94f6
--- /dev/null
+++ b/drivers/target/target_core_internal.h
@@ -0,0 +1,123 @@
1#ifndef TARGET_CORE_INTERNAL_H
2#define TARGET_CORE_INTERNAL_H
3
4/* target_core_alua.c */
5extern struct t10_alua_lu_gp *default_lu_gp;
6
7/* target_core_cdb.c */
8int target_emulate_inquiry(struct se_task *task);
9int target_emulate_readcapacity(struct se_task *task);
10int target_emulate_readcapacity_16(struct se_task *task);
11int target_emulate_modesense(struct se_task *task);
12int target_emulate_request_sense(struct se_task *task);
13int target_emulate_unmap(struct se_task *task);
14int target_emulate_write_same(struct se_task *task);
15int target_emulate_synchronize_cache(struct se_task *task);
16int target_emulate_noop(struct se_task *task);
17
18/* target_core_device.c */
19struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
20int core_free_device_list_for_node(struct se_node_acl *,
21 struct se_portal_group *);
22void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
23void core_update_device_list_access(u32, u32, struct se_node_acl *);
24int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
25 u32, u32, struct se_node_acl *, struct se_portal_group *, int);
26void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
27int core_dev_export(struct se_device *, struct se_portal_group *,
28 struct se_lun *);
29void core_dev_unexport(struct se_device *, struct se_portal_group *,
30 struct se_lun *);
31int target_report_luns(struct se_task *);
32void se_release_device_for_hba(struct se_device *);
33void se_release_vpd_for_dev(struct se_device *);
34int se_free_virtual_device(struct se_device *, struct se_hba *);
35int se_dev_check_online(struct se_device *);
36int se_dev_check_shutdown(struct se_device *);
37void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
38int se_dev_set_task_timeout(struct se_device *, u32);
39int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
40int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
41int se_dev_set_unmap_granularity(struct se_device *, u32);
42int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
43int se_dev_set_emulate_dpo(struct se_device *, int);
44int se_dev_set_emulate_fua_write(struct se_device *, int);
45int se_dev_set_emulate_fua_read(struct se_device *, int);
46int se_dev_set_emulate_write_cache(struct se_device *, int);
47int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
48int se_dev_set_emulate_tas(struct se_device *, int);
49int se_dev_set_emulate_tpu(struct se_device *, int);
50int se_dev_set_emulate_tpws(struct se_device *, int);
51int se_dev_set_enforce_pr_isids(struct se_device *, int);
52int se_dev_set_is_nonrot(struct se_device *, int);
53int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
54int se_dev_set_queue_depth(struct se_device *, u32);
55int se_dev_set_max_sectors(struct se_device *, u32);
56int se_dev_set_optimal_sectors(struct se_device *, u32);
57int se_dev_set_block_size(struct se_device *, u32);
58struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
59 struct se_device *, u32);
60int core_dev_del_lun(struct se_portal_group *, u32);
61struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
62struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
63 u32, char *, int *);
64int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
65 struct se_lun_acl *, u32, u32);
66int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
67 struct se_lun *, struct se_lun_acl *);
68void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
69 struct se_lun_acl *lacl);
70int core_dev_setup_virtual_lun0(void);
71void core_dev_release_virtual_lun0(void);
72
73/* target_core_hba.c */
74struct se_hba *core_alloc_hba(const char *, u32, u32);
75int core_delete_hba(struct se_hba *);
76
77/* target_core_tmr.c */
78int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
79 struct list_head *, struct se_cmd *);
80
81/* target_core_tpg.c */
82extern struct se_device *g_lun0_dev;
83
84struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
85 const char *);
86struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
87 unsigned char *);
88void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
89void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
90struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
91int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
92 u32, void *);
93struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
94int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
95
96/* target_core_transport.c */
97extern struct kmem_cache *se_tmr_req_cache;
98
99int init_se_kmem_caches(void);
100void release_se_kmem_caches(void);
101u32 scsi_get_new_index(scsi_index_t);
102void transport_subsystem_check_init(void);
103void transport_cmd_finish_abort(struct se_cmd *, int);
104void __transport_remove_task_from_execute_queue(struct se_task *,
105 struct se_device *);
106unsigned char *transport_dump_cmd_direction(struct se_cmd *);
107void transport_dump_dev_state(struct se_device *, char *, int *);
108void transport_dump_dev_info(struct se_device *, struct se_lun *,
109 unsigned long long, char *, int *);
110void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
111int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
112int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
113int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
114bool target_stop_task(struct se_task *task, unsigned long *flags);
115int transport_clear_lun_from_sessions(struct se_lun *);
116void transport_send_task_abort(struct se_cmd *);
117
118/* target_core_stat.c */
119void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
120void target_stat_setup_port_default_groups(struct se_lun *);
121void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
122
123#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 95dee7074ae..429ad729166 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -33,14 +33,11 @@
33#include <asm/unaligned.h> 33#include <asm/unaligned.h>
34 34
35#include <target/target_core_base.h> 35#include <target/target_core_base.h>
36#include <target/target_core_device.h> 36#include <target/target_core_backend.h>
37#include <target/target_core_tmr.h> 37#include <target/target_core_fabric.h>
38#include <target/target_core_tpg.h>
39#include <target/target_core_transport.h>
40#include <target/target_core_fabric_ops.h>
41#include <target/target_core_configfs.h> 38#include <target/target_core_configfs.h>
42 39
43#include "target_core_hba.h" 40#include "target_core_internal.h"
44#include "target_core_pr.h" 41#include "target_core_pr.h"
45#include "target_core_ua.h" 42#include "target_core_ua.h"
46 43
@@ -2984,21 +2981,6 @@ static void core_scsi3_release_preempt_and_abort(
2984 } 2981 }
2985} 2982}
2986 2983
2987int core_scsi3_check_cdb_abort_and_preempt(
2988 struct list_head *preempt_and_abort_list,
2989 struct se_cmd *cmd)
2990{
2991 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
2992
2993 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
2994 pr_reg_abort_list) {
2995 if (pr_reg->pr_res_key == cmd->pr_res_key)
2996 return 0;
2997 }
2998
2999 return 1;
3000}
3001
3002static int core_scsi3_pro_preempt( 2984static int core_scsi3_pro_preempt(
3003 struct se_cmd *cmd, 2985 struct se_cmd *cmd,
3004 int type, 2986 int type,
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index b97f6940dd0..7a233feb7e9 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -60,8 +60,6 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
60 struct se_node_acl *); 60 struct se_node_acl *);
61extern void core_scsi3_free_all_registrations(struct se_device *); 61extern void core_scsi3_free_all_registrations(struct se_device *);
62extern unsigned char *core_scsi3_pr_dump_type(int); 62extern unsigned char *core_scsi3_pr_dump_type(int);
63extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
64 struct se_cmd *);
65 63
66extern int target_scsi3_emulate_pr_in(struct se_task *task); 64extern int target_scsi3_emulate_pr_in(struct se_task *task);
67extern int target_scsi3_emulate_pr_out(struct se_task *task); 65extern int target_scsi3_emulate_pr_out(struct se_task *task);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 8b15e56b038..d35467d42e1 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -44,8 +44,7 @@
44#include <scsi/scsi_tcq.h> 44#include <scsi/scsi_tcq.h>
45 45
46#include <target/target_core_base.h> 46#include <target/target_core_base.h>
47#include <target/target_core_device.h> 47#include <target/target_core_backend.h>
48#include <target/target_core_transport.h>
49 48
50#include "target_core_pscsi.h" 49#include "target_core_pscsi.h"
51 50
@@ -105,7 +104,7 @@ static void pscsi_detach_hba(struct se_hba *hba)
105 104
106static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) 105static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
107{ 106{
108 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; 107 struct pscsi_hba_virt *phv = hba->hba_ptr;
109 struct Scsi_Host *sh = phv->phv_lld_host; 108 struct Scsi_Host *sh = phv->phv_lld_host;
110 /* 109 /*
111 * Release the struct Scsi_Host 110 * Release the struct Scsi_Host
@@ -351,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list(
351 * scsi_device_put() and the pdv->pdv_sd cleared. 350 * scsi_device_put() and the pdv->pdv_sd cleared.
352 */ 351 */
353 pdv->pdv_sd = sd; 352 pdv->pdv_sd = sd;
354
355 dev = transport_add_device_to_core_hba(hba, &pscsi_template, 353 dev = transport_add_device_to_core_hba(hba, &pscsi_template,
356 se_dev, dev_flags, pdv, 354 se_dev, dev_flags, pdv,
357 &dev_limits, NULL, NULL); 355 &dev_limits, NULL, NULL);
@@ -406,7 +404,7 @@ static struct se_device *pscsi_create_type_disk(
406 __releases(sh->host_lock) 404 __releases(sh->host_lock)
407{ 405{
408 struct se_device *dev; 406 struct se_device *dev;
409 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 407 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
410 struct Scsi_Host *sh = sd->host; 408 struct Scsi_Host *sh = sd->host;
411 struct block_device *bd; 409 struct block_device *bd;
412 u32 dev_flags = 0; 410 u32 dev_flags = 0;
@@ -454,7 +452,7 @@ static struct se_device *pscsi_create_type_rom(
454 __releases(sh->host_lock) 452 __releases(sh->host_lock)
455{ 453{
456 struct se_device *dev; 454 struct se_device *dev;
457 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 455 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
458 struct Scsi_Host *sh = sd->host; 456 struct Scsi_Host *sh = sd->host;
459 u32 dev_flags = 0; 457 u32 dev_flags = 0;
460 458
@@ -489,7 +487,7 @@ static struct se_device *pscsi_create_type_other(
489 __releases(sh->host_lock) 487 __releases(sh->host_lock)
490{ 488{
491 struct se_device *dev; 489 struct se_device *dev;
492 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 490 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
493 struct Scsi_Host *sh = sd->host; 491 struct Scsi_Host *sh = sd->host;
494 u32 dev_flags = 0; 492 u32 dev_flags = 0;
495 493
@@ -510,10 +508,10 @@ static struct se_device *pscsi_create_virtdevice(
510 struct se_subsystem_dev *se_dev, 508 struct se_subsystem_dev *se_dev,
511 void *p) 509 void *p)
512{ 510{
513 struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p; 511 struct pscsi_dev_virt *pdv = p;
514 struct se_device *dev; 512 struct se_device *dev;
515 struct scsi_device *sd; 513 struct scsi_device *sd;
516 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; 514 struct pscsi_hba_virt *phv = hba->hba_ptr;
517 struct Scsi_Host *sh = phv->phv_lld_host; 515 struct Scsi_Host *sh = phv->phv_lld_host;
518 int legacy_mode_enable = 0; 516 int legacy_mode_enable = 0;
519 517
@@ -818,7 +816,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
818 816
819 orig = opts; 817 orig = opts;
820 818
821 while ((ptr = strsep(&opts, ",")) != NULL) { 819 while ((ptr = strsep(&opts, ",\n")) != NULL) {
822 if (!*ptr) 820 if (!*ptr)
823 continue; 821 continue;
824 822
@@ -1144,7 +1142,7 @@ static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
1144{ 1142{
1145 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1143 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1146 1144
1147 return (unsigned char *)&pt->pscsi_sense[0]; 1145 return pt->pscsi_sense;
1148} 1146}
1149 1147
1150/* pscsi_get_device_rev(): 1148/* pscsi_get_device_rev():
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 02e51faa2f4..8b68f7b8263 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -37,9 +37,7 @@
37#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
38 38
39#include <target/target_core_base.h> 39#include <target/target_core_base.h>
40#include <target/target_core_device.h> 40#include <target/target_core_backend.h>
41#include <target/target_core_transport.h>
42#include <target/target_core_fabric_ops.h>
43 41
44#include "target_core_rd.h" 42#include "target_core_rd.h"
45 43
@@ -474,7 +472,7 @@ static ssize_t rd_set_configfs_dev_params(
474 472
475 orig = opts; 473 orig = opts;
476 474
477 while ((ptr = strsep(&opts, ",")) != NULL) { 475 while ((ptr = strsep(&opts, ",\n")) != NULL) {
478 if (!*ptr) 476 if (!*ptr)
479 continue; 477 continue;
480 478
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 874152aed94..f8c2d2cc343 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -43,12 +43,12 @@
43#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
44 44
45#include <target/target_core_base.h> 45#include <target/target_core_base.h>
46#include <target/target_core_transport.h> 46#include <target/target_core_backend.h>
47#include <target/target_core_fabric_ops.h> 47#include <target/target_core_fabric.h>
48#include <target/target_core_configfs.h> 48#include <target/target_core_configfs.h>
49#include <target/configfs_macros.h> 49#include <target/configfs_macros.h>
50 50
51#include "target_core_hba.h" 51#include "target_core_internal.h"
52 52
53#ifndef INITIAL_JIFFIES 53#ifndef INITIAL_JIFFIES
54#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) 54#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -1755,8 +1755,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
1755 /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ 1755 /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
1756 memset(buf, 0, 64); 1756 memset(buf, 0, 64);
1757 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) 1757 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
1758 tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 1758 tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
1759 (unsigned char *)&buf[0], 64);
1760 1759
1761 ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); 1760 ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
1762 spin_unlock_irq(&nacl->nacl_sess_lock); 1761 spin_unlock_irq(&nacl->nacl_sess_lock);
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h
deleted file mode 100644
index 86c252f9ea4..00000000000
--- a/drivers/target/target_core_stat.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef TARGET_CORE_STAT_H
2#define TARGET_CORE_STAT_H
3
4extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
5extern void target_stat_setup_port_default_groups(struct se_lun *);
6extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
7
8#endif /*** TARGET_CORE_STAT_H ***/
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 684522805a1..dcb0618c938 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -32,12 +32,11 @@
32#include <scsi/scsi_cmnd.h> 32#include <scsi/scsi_cmnd.h>
33 33
34#include <target/target_core_base.h> 34#include <target/target_core_base.h>
35#include <target/target_core_device.h> 35#include <target/target_core_backend.h>
36#include <target/target_core_tmr.h> 36#include <target/target_core_fabric.h>
37#include <target/target_core_transport.h>
38#include <target/target_core_fabric_ops.h>
39#include <target/target_core_configfs.h> 37#include <target/target_core_configfs.h>
40 38
39#include "target_core_internal.h"
41#include "target_core_alua.h" 40#include "target_core_alua.h"
42#include "target_core_pr.h" 41#include "target_core_pr.h"
43 42
@@ -101,6 +100,21 @@ static void core_tmr_handle_tas_abort(
101 transport_cmd_finish_abort(cmd, 0); 100 transport_cmd_finish_abort(cmd, 0);
102} 101}
103 102
103static int target_check_cdb_and_preempt(struct list_head *list,
104 struct se_cmd *cmd)
105{
106 struct t10_pr_registration *reg;
107
108 if (!list)
109 return 0;
110 list_for_each_entry(reg, list, pr_reg_abort_list) {
111 if (reg->pr_res_key == cmd->pr_res_key)
112 return 0;
113 }
114
115 return 1;
116}
117
104static void core_tmr_drain_tmr_list( 118static void core_tmr_drain_tmr_list(
105 struct se_device *dev, 119 struct se_device *dev,
106 struct se_tmr_req *tmr, 120 struct se_tmr_req *tmr,
@@ -132,9 +146,7 @@ static void core_tmr_drain_tmr_list(
132 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action 146 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
133 * skip non regisration key matching TMRs. 147 * skip non regisration key matching TMRs.
134 */ 148 */
135 if (preempt_and_abort_list && 149 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
136 (core_scsi3_check_cdb_abort_and_preempt(
137 preempt_and_abort_list, cmd) != 0))
138 continue; 150 continue;
139 151
140 spin_lock(&cmd->t_state_lock); 152 spin_lock(&cmd->t_state_lock);
@@ -211,9 +223,7 @@ static void core_tmr_drain_task_list(
211 * For PREEMPT_AND_ABORT usage, only process commands 223 * For PREEMPT_AND_ABORT usage, only process commands
212 * with a matching reservation key. 224 * with a matching reservation key.
213 */ 225 */
214 if (preempt_and_abort_list && 226 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
215 (core_scsi3_check_cdb_abort_and_preempt(
216 preempt_and_abort_list, cmd) != 0))
217 continue; 227 continue;
218 /* 228 /*
219 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 229 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
@@ -222,7 +232,7 @@ static void core_tmr_drain_task_list(
222 continue; 232 continue;
223 233
224 list_move_tail(&task->t_state_list, &drain_task_list); 234 list_move_tail(&task->t_state_list, &drain_task_list);
225 atomic_set(&task->task_state_active, 0); 235 task->t_state_active = false;
226 /* 236 /*
227 * Remove from task execute list before processing drain_task_list 237 * Remove from task execute list before processing drain_task_list
228 */ 238 */
@@ -321,9 +331,7 @@ static void core_tmr_drain_cmd_list(
321 * For PREEMPT_AND_ABORT usage, only process commands 331 * For PREEMPT_AND_ABORT usage, only process commands
322 * with a matching reservation key. 332 * with a matching reservation key.
323 */ 333 */
324 if (preempt_and_abort_list && 334 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
325 (core_scsi3_check_cdb_abort_and_preempt(
326 preempt_and_abort_list, cmd) != 0))
327 continue; 335 continue;
328 /* 336 /*
329 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 337 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 8ddd133025b..b7668029bb3 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -39,13 +39,10 @@
39#include <scsi/scsi_cmnd.h> 39#include <scsi/scsi_cmnd.h>
40 40
41#include <target/target_core_base.h> 41#include <target/target_core_base.h>
42#include <target/target_core_device.h> 42#include <target/target_core_backend.h>
43#include <target/target_core_tpg.h> 43#include <target/target_core_fabric.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46 44
47#include "target_core_hba.h" 45#include "target_core_internal.h"
48#include "target_core_stat.h"
49 46
50extern struct se_device *g_lun0_dev; 47extern struct se_device *g_lun0_dev;
51 48
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0257658e2e3..d3ddd136194 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -45,16 +45,12 @@
45#include <scsi/scsi_tcq.h> 45#include <scsi/scsi_tcq.h>
46 46
47#include <target/target_core_base.h> 47#include <target/target_core_base.h>
48#include <target/target_core_device.h> 48#include <target/target_core_backend.h>
49#include <target/target_core_tmr.h> 49#include <target/target_core_fabric.h>
50#include <target/target_core_tpg.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_configfs.h> 50#include <target/target_core_configfs.h>
54 51
52#include "target_core_internal.h"
55#include "target_core_alua.h" 53#include "target_core_alua.h"
56#include "target_core_cdb.h"
57#include "target_core_hba.h"
58#include "target_core_pr.h" 54#include "target_core_pr.h"
59#include "target_core_ua.h" 55#include "target_core_ua.h"
60 56
@@ -72,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
72 68
73static int transport_generic_write_pending(struct se_cmd *); 69static int transport_generic_write_pending(struct se_cmd *);
74static int transport_processing_thread(void *param); 70static int transport_processing_thread(void *param);
75static int __transport_execute_tasks(struct se_device *dev); 71static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
76static void transport_complete_task_attr(struct se_cmd *cmd); 72static void transport_complete_task_attr(struct se_cmd *cmd);
77static void transport_handle_queue_full(struct se_cmd *cmd, 73static void transport_handle_queue_full(struct se_cmd *cmd,
78 struct se_device *dev); 74 struct se_device *dev);
@@ -212,14 +208,13 @@ u32 scsi_get_new_index(scsi_index_t type)
212 return new_index; 208 return new_index;
213} 209}
214 210
215void transport_init_queue_obj(struct se_queue_obj *qobj) 211static void transport_init_queue_obj(struct se_queue_obj *qobj)
216{ 212{
217 atomic_set(&qobj->queue_cnt, 0); 213 atomic_set(&qobj->queue_cnt, 0);
218 INIT_LIST_HEAD(&qobj->qobj_list); 214 INIT_LIST_HEAD(&qobj->qobj_list);
219 init_waitqueue_head(&qobj->thread_wq); 215 init_waitqueue_head(&qobj->thread_wq);
220 spin_lock_init(&qobj->cmd_queue_lock); 216 spin_lock_init(&qobj->cmd_queue_lock);
221} 217}
222EXPORT_SYMBOL(transport_init_queue_obj);
223 218
224void transport_subsystem_check_init(void) 219void transport_subsystem_check_init(void)
225{ 220{
@@ -426,18 +421,18 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
426 if (task->task_flags & TF_ACTIVE) 421 if (task->task_flags & TF_ACTIVE)
427 continue; 422 continue;
428 423
429 if (!atomic_read(&task->task_state_active))
430 continue;
431
432 spin_lock_irqsave(&dev->execute_task_lock, flags); 424 spin_lock_irqsave(&dev->execute_task_lock, flags);
433 list_del(&task->t_state_list); 425 if (task->t_state_active) {
434 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", 426 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
435 cmd->se_tfo->get_task_tag(cmd), dev, task); 427 cmd->se_tfo->get_task_tag(cmd), dev, task);
436 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
437 428
438 atomic_set(&task->task_state_active, 0); 429 list_del(&task->t_state_list);
439 atomic_dec(&cmd->t_task_cdbs_ex_left); 430 atomic_dec(&cmd->t_task_cdbs_ex_left);
431 task->t_state_active = false;
432 }
433 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
440 } 434 }
435
441} 436}
442 437
443/* transport_cmd_check_stop(): 438/* transport_cmd_check_stop():
@@ -696,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success)
696 struct se_cmd *cmd = task->task_se_cmd; 691 struct se_cmd *cmd = task->task_se_cmd;
697 struct se_device *dev = cmd->se_dev; 692 struct se_device *dev = cmd->se_dev;
698 unsigned long flags; 693 unsigned long flags;
699#if 0
700 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
701 cmd->t_task_cdb[0], dev);
702#endif
703 if (dev)
704 atomic_inc(&dev->depth_left);
705 694
706 spin_lock_irqsave(&cmd->t_state_lock, flags); 695 spin_lock_irqsave(&cmd->t_state_lock, flags);
707 task->task_flags &= ~TF_ACTIVE; 696 task->task_flags &= ~TF_ACTIVE;
@@ -714,7 +703,7 @@ void transport_complete_task(struct se_task *task, int success)
714 if (dev && dev->transport->transport_complete) { 703 if (dev && dev->transport->transport_complete) {
715 if (dev->transport->transport_complete(task) != 0) { 704 if (dev->transport->transport_complete(task) != 0) {
716 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 705 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
717 task->task_sense = 1; 706 task->task_flags |= TF_HAS_SENSE;
718 success = 1; 707 success = 1;
719 } 708 }
720 } 709 }
@@ -743,13 +732,7 @@ void transport_complete_task(struct se_task *task, int success)
743 } 732 }
744 733
745 if (cmd->t_tasks_failed) { 734 if (cmd->t_tasks_failed) {
746 if (!task->task_error_status) { 735 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
747 task->task_error_status =
748 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
749 cmd->scsi_sense_reason =
750 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
751 }
752
753 INIT_WORK(&cmd->work, target_complete_failure_work); 736 INIT_WORK(&cmd->work, target_complete_failure_work);
754 } else { 737 } else {
755 atomic_set(&cmd->t_transport_complete, 1); 738 atomic_set(&cmd->t_transport_complete, 1);
@@ -824,7 +807,7 @@ static void __transport_add_task_to_execute_queue(
824 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); 807 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
825 atomic_inc(&dev->execute_tasks); 808 atomic_inc(&dev->execute_tasks);
826 809
827 if (atomic_read(&task->task_state_active)) 810 if (task->t_state_active)
828 return; 811 return;
829 /* 812 /*
830 * Determine if this task needs to go to HEAD_OF_QUEUE for the 813 * Determine if this task needs to go to HEAD_OF_QUEUE for the
@@ -838,7 +821,7 @@ static void __transport_add_task_to_execute_queue(
838 else 821 else
839 list_add_tail(&task->t_state_list, &dev->state_task_list); 822 list_add_tail(&task->t_state_list, &dev->state_task_list);
840 823
841 atomic_set(&task->task_state_active, 1); 824 task->t_state_active = true;
842 825
843 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 826 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
844 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), 827 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
@@ -853,29 +836,26 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
853 836
854 spin_lock_irqsave(&cmd->t_state_lock, flags); 837 spin_lock_irqsave(&cmd->t_state_lock, flags);
855 list_for_each_entry(task, &cmd->t_task_list, t_list) { 838 list_for_each_entry(task, &cmd->t_task_list, t_list) {
856 if (atomic_read(&task->task_state_active))
857 continue;
858
859 spin_lock(&dev->execute_task_lock); 839 spin_lock(&dev->execute_task_lock);
860 list_add_tail(&task->t_state_list, &dev->state_task_list); 840 if (!task->t_state_active) {
861 atomic_set(&task->task_state_active, 1); 841 list_add_tail(&task->t_state_list,
862 842 &dev->state_task_list);
863 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 843 task->t_state_active = true;
864 task->task_se_cmd->se_tfo->get_task_tag( 844
865 task->task_se_cmd), task, dev); 845 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
866 846 task->task_se_cmd->se_tfo->get_task_tag(
847 task->task_se_cmd), task, dev);
848 }
867 spin_unlock(&dev->execute_task_lock); 849 spin_unlock(&dev->execute_task_lock);
868 } 850 }
869 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 851 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
870} 852}
871 853
872static void transport_add_tasks_from_cmd(struct se_cmd *cmd) 854static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
873{ 855{
874 struct se_device *dev = cmd->se_dev; 856 struct se_device *dev = cmd->se_dev;
875 struct se_task *task, *task_prev = NULL; 857 struct se_task *task, *task_prev = NULL;
876 unsigned long flags;
877 858
878 spin_lock_irqsave(&dev->execute_task_lock, flags);
879 list_for_each_entry(task, &cmd->t_task_list, t_list) { 859 list_for_each_entry(task, &cmd->t_task_list, t_list) {
880 if (!list_empty(&task->t_execute_list)) 860 if (!list_empty(&task->t_execute_list))
881 continue; 861 continue;
@@ -886,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
886 __transport_add_task_to_execute_queue(task, task_prev, dev); 866 __transport_add_task_to_execute_queue(task, task_prev, dev);
887 task_prev = task; 867 task_prev = task;
888 } 868 }
869}
870
871static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
872{
873 unsigned long flags;
874 struct se_device *dev = cmd->se_dev;
875
876 spin_lock_irqsave(&dev->execute_task_lock, flags);
877 __transport_add_tasks_from_cmd(cmd);
889 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 878 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
890} 879}
891 880
@@ -896,7 +885,7 @@ void __transport_remove_task_from_execute_queue(struct se_task *task,
896 atomic_dec(&dev->execute_tasks); 885 atomic_dec(&dev->execute_tasks);
897} 886}
898 887
899void transport_remove_task_from_execute_queue( 888static void transport_remove_task_from_execute_queue(
900 struct se_task *task, 889 struct se_task *task,
901 struct se_device *dev) 890 struct se_device *dev)
902{ 891{
@@ -983,9 +972,8 @@ void transport_dump_dev_state(
983 break; 972 break;
984 } 973 }
985 974
986 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", 975 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
987 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), 976 atomic_read(&dev->execute_tasks), dev->queue_depth);
988 dev->queue_depth);
989 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", 977 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
990 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); 978 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
991 *bl += sprintf(b + *bl, " "); 979 *bl += sprintf(b + *bl, " ");
@@ -1340,9 +1328,6 @@ struct se_device *transport_add_device_to_core_hba(
1340 spin_lock_init(&dev->se_port_lock); 1328 spin_lock_init(&dev->se_port_lock);
1341 spin_lock_init(&dev->se_tmr_lock); 1329 spin_lock_init(&dev->se_tmr_lock);
1342 spin_lock_init(&dev->qf_cmd_lock); 1330 spin_lock_init(&dev->qf_cmd_lock);
1343
1344 dev->queue_depth = dev_limits->queue_depth;
1345 atomic_set(&dev->depth_left, dev->queue_depth);
1346 atomic_set(&dev->dev_ordered_id, 0); 1331 atomic_set(&dev->dev_ordered_id, 0);
1347 1332
1348 se_dev_set_default_attribs(dev, dev_limits); 1333 se_dev_set_default_attribs(dev, dev_limits);
@@ -1654,6 +1639,80 @@ int transport_handle_cdb_direct(
1654} 1639}
1655EXPORT_SYMBOL(transport_handle_cdb_direct); 1640EXPORT_SYMBOL(transport_handle_cdb_direct);
1656 1641
1642/**
1643 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1644 *
1645 * @se_cmd: command descriptor to submit
1646 * @se_sess: associated se_sess for endpoint
1647 * @cdb: pointer to SCSI CDB
1648 * @sense: pointer to SCSI sense buffer
1649 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1650 * @data_length: fabric expected data transfer length
1651 * @task_addr: SAM task attribute
1652 * @data_dir: DMA data direction
1653 * @flags: flags for command submission from target_sc_flags_tables
1654 *
1655 * This may only be called from process context, and also currently
1656 * assumes internal allocation of fabric payload buffer by target-core.
1657 **/
1658int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1659 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1660 u32 data_length, int task_attr, int data_dir, int flags)
1661{
1662 struct se_portal_group *se_tpg;
1663 int rc;
1664
1665 se_tpg = se_sess->se_tpg;
1666 BUG_ON(!se_tpg);
1667 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1668 BUG_ON(in_interrupt());
1669 /*
1670 * Initialize se_cmd for target operation. From this point
1671 * exceptions are handled by sending exception status via
1672 * target_core_fabric_ops->queue_status() callback
1673 */
1674 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1675 data_length, data_dir, task_attr, sense);
1676 /*
1677 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1678 * se_sess->sess_cmd_list. A second kref_get here is necessary
1679 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1680 * kref_put() to happen during fabric packet acknowledgement.
1681 */
1682 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1683 /*
1684 * Signal bidirectional data payloads to target-core
1685 */
1686 if (flags & TARGET_SCF_BIDI_OP)
1687 se_cmd->se_cmd_flags |= SCF_BIDI;
1688 /*
1689 * Locate se_lun pointer and attach it to struct se_cmd
1690 */
1691 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0)
1692 goto out_check_cond;
1693 /*
1694 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1695 * allocate the necessary tasks to complete the received CDB+data
1696 */
1697 rc = transport_generic_allocate_tasks(se_cmd, cdb);
1698 if (rc != 0)
1699 goto out_check_cond;
1700 /*
1701 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1702 * for immediate execution of READs, otherwise wait for
1703 * transport_generic_handle_data() to be called for WRITEs
1704 * when fabric has filled the incoming buffer.
1705 */
1706 transport_handle_cdb_direct(se_cmd);
1707 return 0;
1708
1709out_check_cond:
1710 transport_send_check_condition_and_sense(se_cmd,
1711 se_cmd->scsi_sense_reason, 0);
1712 return 0;
1713}
1714EXPORT_SYMBOL(target_submit_cmd);
1715
1657/* 1716/*
1658 * Used by fabric module frontends defining a TFO->new_cmd_map() caller 1717 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1659 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to 1718 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@@ -1920,18 +1979,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1920 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1979 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1921} 1980}
1922 1981
1923static inline int transport_tcq_window_closed(struct se_device *dev)
1924{
1925 if (dev->dev_tcq_window_closed++ <
1926 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
1927 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
1928 } else
1929 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
1930
1931 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
1932 return 0;
1933}
1934
1935/* 1982/*
1936 * Called from Fabric Module context from transport_execute_tasks() 1983 * Called from Fabric Module context from transport_execute_tasks()
1937 * 1984 *
@@ -2014,13 +2061,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2014static int transport_execute_tasks(struct se_cmd *cmd) 2061static int transport_execute_tasks(struct se_cmd *cmd)
2015{ 2062{
2016 int add_tasks; 2063 int add_tasks;
2017 2064 struct se_device *se_dev = cmd->se_dev;
2018 if (se_dev_check_online(cmd->se_dev) != 0) {
2019 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2020 transport_generic_request_failure(cmd);
2021 return 0;
2022 }
2023
2024 /* 2065 /*
2025 * Call transport_cmd_check_stop() to see if a fabric exception 2066 * Call transport_cmd_check_stop() to see if a fabric exception
2026 * has occurred that prevents execution. 2067 * has occurred that prevents execution.
@@ -2034,19 +2075,16 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2034 if (!add_tasks) 2075 if (!add_tasks)
2035 goto execute_tasks; 2076 goto execute_tasks;
2036 /* 2077 /*
2037 * This calls transport_add_tasks_from_cmd() to handle 2078 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
2038 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation 2079 * adds associated se_tasks while holding dev->execute_task_lock
2039 * (if enabled) in __transport_add_task_to_execute_queue() and 2080 * before I/O dispath to avoid a double spinlock access.
2040 * transport_add_task_check_sam_attr().
2041 */ 2081 */
2042 transport_add_tasks_from_cmd(cmd); 2082 __transport_execute_tasks(se_dev, cmd);
2083 return 0;
2043 } 2084 }
2044 /* 2085
2045 * Kick the execution queue for the cmd associated struct se_device
2046 * storage object.
2047 */
2048execute_tasks: 2086execute_tasks:
2049 __transport_execute_tasks(cmd->se_dev); 2087 __transport_execute_tasks(se_dev, NULL);
2050 return 0; 2088 return 0;
2051} 2089}
2052 2090
@@ -2056,24 +2094,18 @@ execute_tasks:
2056 * 2094 *
2057 * Called from transport_processing_thread() 2095 * Called from transport_processing_thread()
2058 */ 2096 */
2059static int __transport_execute_tasks(struct se_device *dev) 2097static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2060{ 2098{
2061 int error; 2099 int error;
2062 struct se_cmd *cmd = NULL; 2100 struct se_cmd *cmd = NULL;
2063 struct se_task *task = NULL; 2101 struct se_task *task = NULL;
2064 unsigned long flags; 2102 unsigned long flags;
2065 2103
2066 /*
2067 * Check if there is enough room in the device and HBA queue to send
2068 * struct se_tasks to the selected transport.
2069 */
2070check_depth: 2104check_depth:
2071 if (!atomic_read(&dev->depth_left))
2072 return transport_tcq_window_closed(dev);
2073
2074 dev->dev_tcq_window_closed = 0;
2075
2076 spin_lock_irq(&dev->execute_task_lock); 2105 spin_lock_irq(&dev->execute_task_lock);
2106 if (new_cmd != NULL)
2107 __transport_add_tasks_from_cmd(new_cmd);
2108
2077 if (list_empty(&dev->execute_task_list)) { 2109 if (list_empty(&dev->execute_task_list)) {
2078 spin_unlock_irq(&dev->execute_task_lock); 2110 spin_unlock_irq(&dev->execute_task_lock);
2079 return 0; 2111 return 0;
@@ -2083,10 +2115,7 @@ check_depth:
2083 __transport_remove_task_from_execute_queue(task, dev); 2115 __transport_remove_task_from_execute_queue(task, dev);
2084 spin_unlock_irq(&dev->execute_task_lock); 2116 spin_unlock_irq(&dev->execute_task_lock);
2085 2117
2086 atomic_dec(&dev->depth_left);
2087
2088 cmd = task->task_se_cmd; 2118 cmd = task->task_se_cmd;
2089
2090 spin_lock_irqsave(&cmd->t_state_lock, flags); 2119 spin_lock_irqsave(&cmd->t_state_lock, flags);
2091 task->task_flags |= (TF_ACTIVE | TF_SENT); 2120 task->task_flags |= (TF_ACTIVE | TF_SENT);
2092 atomic_inc(&cmd->t_task_cdbs_sent); 2121 atomic_inc(&cmd->t_task_cdbs_sent);
@@ -2107,10 +2136,10 @@ check_depth:
2107 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2136 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2108 atomic_set(&cmd->t_transport_sent, 0); 2137 atomic_set(&cmd->t_transport_sent, 0);
2109 transport_stop_tasks_for_cmd(cmd); 2138 transport_stop_tasks_for_cmd(cmd);
2110 atomic_inc(&dev->depth_left);
2111 transport_generic_request_failure(cmd); 2139 transport_generic_request_failure(cmd);
2112 } 2140 }
2113 2141
2142 new_cmd = NULL;
2114 goto check_depth; 2143 goto check_depth;
2115 2144
2116 return 0; 2145 return 0;
@@ -2351,7 +2380,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2351 2380
2352 list_for_each_entry_safe(task, task_tmp, 2381 list_for_each_entry_safe(task, task_tmp,
2353 &cmd->t_task_list, t_list) { 2382 &cmd->t_task_list, t_list) {
2354 if (!task->task_sense) 2383 if (!(task->task_flags & TF_HAS_SENSE))
2355 continue; 2384 continue;
2356 2385
2357 if (!dev->transport->get_sense_buffer) { 2386 if (!dev->transport->get_sense_buffer) {
@@ -3346,6 +3375,32 @@ static inline void transport_free_pages(struct se_cmd *cmd)
3346} 3375}
3347 3376
3348/** 3377/**
3378 * transport_release_cmd - free a command
3379 * @cmd: command to free
3380 *
3381 * This routine unconditionally frees a command, and reference counting
3382 * or list removal must be done in the caller.
3383 */
3384static void transport_release_cmd(struct se_cmd *cmd)
3385{
3386 BUG_ON(!cmd->se_tfo);
3387
3388 if (cmd->se_tmr_req)
3389 core_tmr_release_req(cmd->se_tmr_req);
3390 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3391 kfree(cmd->t_task_cdb);
3392 /*
3393 * If this cmd has been setup with target_get_sess_cmd(), drop
3394 * the kref and call ->release_cmd() in kref callback.
3395 */
3396 if (cmd->check_release != 0) {
3397 target_put_sess_cmd(cmd->se_sess, cmd);
3398 return;
3399 }
3400 cmd->se_tfo->release_cmd(cmd);
3401}
3402
3403/**
3349 * transport_put_cmd - release a reference to a command 3404 * transport_put_cmd - release a reference to a command
3350 * @cmd: command to release 3405 * @cmd: command to release
3351 * 3406 *
@@ -3870,33 +3925,6 @@ queue_full:
3870 return 0; 3925 return 0;
3871} 3926}
3872 3927
3873/**
3874 * transport_release_cmd - free a command
3875 * @cmd: command to free
3876 *
3877 * This routine unconditionally frees a command, and reference counting
3878 * or list removal must be done in the caller.
3879 */
3880void transport_release_cmd(struct se_cmd *cmd)
3881{
3882 BUG_ON(!cmd->se_tfo);
3883
3884 if (cmd->se_tmr_req)
3885 core_tmr_release_req(cmd->se_tmr_req);
3886 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3887 kfree(cmd->t_task_cdb);
3888 /*
3889 * Check if target_wait_for_sess_cmds() is expecting to
3890 * release se_cmd directly here..
3891 */
3892 if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
3893 if (cmd->se_tfo->check_release_cmd(cmd) != 0)
3894 return;
3895
3896 cmd->se_tfo->release_cmd(cmd);
3897}
3898EXPORT_SYMBOL(transport_release_cmd);
3899
3900void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 3928void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3901{ 3929{
3902 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 3930 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -3923,11 +3951,22 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
3923/* target_get_sess_cmd - Add command to active ->sess_cmd_list 3951/* target_get_sess_cmd - Add command to active ->sess_cmd_list
3924 * @se_sess: session to reference 3952 * @se_sess: session to reference
3925 * @se_cmd: command descriptor to add 3953 * @se_cmd: command descriptor to add
3954 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
3926 */ 3955 */
3927void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 3956void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
3957 bool ack_kref)
3928{ 3958{
3929 unsigned long flags; 3959 unsigned long flags;
3930 3960
3961 kref_init(&se_cmd->cmd_kref);
3962 /*
3963 * Add a second kref if the fabric caller is expecting to handle
3964 * fabric acknowledgement that requires two target_put_sess_cmd()
3965 * invocations before se_cmd descriptor release.
3966 */
3967 if (ack_kref == true)
3968 kref_get(&se_cmd->cmd_kref);
3969
3931 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 3970 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3932 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 3971 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3933 se_cmd->check_release = 1; 3972 se_cmd->check_release = 1;
@@ -3935,30 +3974,36 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3935} 3974}
3936EXPORT_SYMBOL(target_get_sess_cmd); 3975EXPORT_SYMBOL(target_get_sess_cmd);
3937 3976
3938/* target_put_sess_cmd - Check for active I/O shutdown or list delete 3977static void target_release_cmd_kref(struct kref *kref)
3939 * @se_sess: session to reference
3940 * @se_cmd: command descriptor to drop
3941 */
3942int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3943{ 3978{
3979 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
3980 struct se_session *se_sess = se_cmd->se_sess;
3944 unsigned long flags; 3981 unsigned long flags;
3945 3982
3946 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 3983 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3947 if (list_empty(&se_cmd->se_cmd_list)) { 3984 if (list_empty(&se_cmd->se_cmd_list)) {
3948 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3985 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3949 WARN_ON(1); 3986 WARN_ON(1);
3950 return 0; 3987 return;
3951 } 3988 }
3952
3953 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 3989 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
3954 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3990 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3955 complete(&se_cmd->cmd_wait_comp); 3991 complete(&se_cmd->cmd_wait_comp);
3956 return 1; 3992 return;
3957 } 3993 }
3958 list_del(&se_cmd->se_cmd_list); 3994 list_del(&se_cmd->se_cmd_list);
3959 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3995 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3960 3996
3961 return 0; 3997 se_cmd->se_tfo->release_cmd(se_cmd);
3998}
3999
4000/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
4001 * @se_sess: session to reference
4002 * @se_cmd: command descriptor to drop
4003 */
4004int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
4005{
4006 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
3962} 4007}
3963EXPORT_SYMBOL(target_put_sess_cmd); 4008EXPORT_SYMBOL(target_put_sess_cmd);
3964 4009
@@ -4174,7 +4219,7 @@ check_cond:
4174 4219
4175static int transport_clear_lun_thread(void *p) 4220static int transport_clear_lun_thread(void *p)
4176{ 4221{
4177 struct se_lun *lun = (struct se_lun *)p; 4222 struct se_lun *lun = p;
4178 4223
4179 __transport_clear_lun_from_sessions(lun); 4224 __transport_clear_lun_from_sessions(lun);
4180 complete(&lun->lun_shutdown_comp); 4225 complete(&lun->lun_shutdown_comp);
@@ -4353,6 +4398,7 @@ int transport_send_check_condition_and_sense(
4353 case TCM_NON_EXISTENT_LUN: 4398 case TCM_NON_EXISTENT_LUN:
4354 /* CURRENT ERROR */ 4399 /* CURRENT ERROR */
4355 buffer[offset] = 0x70; 4400 buffer[offset] = 0x70;
4401 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4356 /* ILLEGAL REQUEST */ 4402 /* ILLEGAL REQUEST */
4357 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4403 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4358 /* LOGICAL UNIT NOT SUPPORTED */ 4404 /* LOGICAL UNIT NOT SUPPORTED */
@@ -4362,6 +4408,7 @@ int transport_send_check_condition_and_sense(
4362 case TCM_SECTOR_COUNT_TOO_MANY: 4408 case TCM_SECTOR_COUNT_TOO_MANY:
4363 /* CURRENT ERROR */ 4409 /* CURRENT ERROR */
4364 buffer[offset] = 0x70; 4410 buffer[offset] = 0x70;
4411 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4365 /* ILLEGAL REQUEST */ 4412 /* ILLEGAL REQUEST */
4366 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4413 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4367 /* INVALID COMMAND OPERATION CODE */ 4414 /* INVALID COMMAND OPERATION CODE */
@@ -4370,6 +4417,7 @@ int transport_send_check_condition_and_sense(
4370 case TCM_UNKNOWN_MODE_PAGE: 4417 case TCM_UNKNOWN_MODE_PAGE:
4371 /* CURRENT ERROR */ 4418 /* CURRENT ERROR */
4372 buffer[offset] = 0x70; 4419 buffer[offset] = 0x70;
4420 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4373 /* ILLEGAL REQUEST */ 4421 /* ILLEGAL REQUEST */
4374 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4422 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4375 /* INVALID FIELD IN CDB */ 4423 /* INVALID FIELD IN CDB */
@@ -4378,6 +4426,7 @@ int transport_send_check_condition_and_sense(
4378 case TCM_CHECK_CONDITION_ABORT_CMD: 4426 case TCM_CHECK_CONDITION_ABORT_CMD:
4379 /* CURRENT ERROR */ 4427 /* CURRENT ERROR */
4380 buffer[offset] = 0x70; 4428 buffer[offset] = 0x70;
4429 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4381 /* ABORTED COMMAND */ 4430 /* ABORTED COMMAND */
4382 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4431 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4383 /* BUS DEVICE RESET FUNCTION OCCURRED */ 4432 /* BUS DEVICE RESET FUNCTION OCCURRED */
@@ -4387,6 +4436,7 @@ int transport_send_check_condition_and_sense(
4387 case TCM_INCORRECT_AMOUNT_OF_DATA: 4436 case TCM_INCORRECT_AMOUNT_OF_DATA:
4388 /* CURRENT ERROR */ 4437 /* CURRENT ERROR */
4389 buffer[offset] = 0x70; 4438 buffer[offset] = 0x70;
4439 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4390 /* ABORTED COMMAND */ 4440 /* ABORTED COMMAND */
4391 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4441 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4392 /* WRITE ERROR */ 4442 /* WRITE ERROR */
@@ -4397,6 +4447,7 @@ int transport_send_check_condition_and_sense(
4397 case TCM_INVALID_CDB_FIELD: 4447 case TCM_INVALID_CDB_FIELD:
4398 /* CURRENT ERROR */ 4448 /* CURRENT ERROR */
4399 buffer[offset] = 0x70; 4449 buffer[offset] = 0x70;
4450 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4400 /* ABORTED COMMAND */ 4451 /* ABORTED COMMAND */
4401 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4452 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4402 /* INVALID FIELD IN CDB */ 4453 /* INVALID FIELD IN CDB */
@@ -4405,6 +4456,7 @@ int transport_send_check_condition_and_sense(
4405 case TCM_INVALID_PARAMETER_LIST: 4456 case TCM_INVALID_PARAMETER_LIST:
4406 /* CURRENT ERROR */ 4457 /* CURRENT ERROR */
4407 buffer[offset] = 0x70; 4458 buffer[offset] = 0x70;
4459 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4408 /* ABORTED COMMAND */ 4460 /* ABORTED COMMAND */
4409 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4461 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4410 /* INVALID FIELD IN PARAMETER LIST */ 4462 /* INVALID FIELD IN PARAMETER LIST */
@@ -4413,6 +4465,7 @@ int transport_send_check_condition_and_sense(
4413 case TCM_UNEXPECTED_UNSOLICITED_DATA: 4465 case TCM_UNEXPECTED_UNSOLICITED_DATA:
4414 /* CURRENT ERROR */ 4466 /* CURRENT ERROR */
4415 buffer[offset] = 0x70; 4467 buffer[offset] = 0x70;
4468 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4416 /* ABORTED COMMAND */ 4469 /* ABORTED COMMAND */
4417 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4470 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4418 /* WRITE ERROR */ 4471 /* WRITE ERROR */
@@ -4423,6 +4476,7 @@ int transport_send_check_condition_and_sense(
4423 case TCM_SERVICE_CRC_ERROR: 4476 case TCM_SERVICE_CRC_ERROR:
4424 /* CURRENT ERROR */ 4477 /* CURRENT ERROR */
4425 buffer[offset] = 0x70; 4478 buffer[offset] = 0x70;
4479 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4426 /* ABORTED COMMAND */ 4480 /* ABORTED COMMAND */
4427 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4481 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4428 /* PROTOCOL SERVICE CRC ERROR */ 4482 /* PROTOCOL SERVICE CRC ERROR */
@@ -4433,6 +4487,7 @@ int transport_send_check_condition_and_sense(
4433 case TCM_SNACK_REJECTED: 4487 case TCM_SNACK_REJECTED:
4434 /* CURRENT ERROR */ 4488 /* CURRENT ERROR */
4435 buffer[offset] = 0x70; 4489 buffer[offset] = 0x70;
4490 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4436 /* ABORTED COMMAND */ 4491 /* ABORTED COMMAND */
4437 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4492 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4438 /* READ ERROR */ 4493 /* READ ERROR */
@@ -4443,6 +4498,7 @@ int transport_send_check_condition_and_sense(
4443 case TCM_WRITE_PROTECTED: 4498 case TCM_WRITE_PROTECTED:
4444 /* CURRENT ERROR */ 4499 /* CURRENT ERROR */
4445 buffer[offset] = 0x70; 4500 buffer[offset] = 0x70;
4501 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4446 /* DATA PROTECT */ 4502 /* DATA PROTECT */
4447 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 4503 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4448 /* WRITE PROTECTED */ 4504 /* WRITE PROTECTED */
@@ -4451,6 +4507,7 @@ int transport_send_check_condition_and_sense(
4451 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 4507 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4452 /* CURRENT ERROR */ 4508 /* CURRENT ERROR */
4453 buffer[offset] = 0x70; 4509 buffer[offset] = 0x70;
4510 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4454 /* UNIT ATTENTION */ 4511 /* UNIT ATTENTION */
4455 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 4512 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4456 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 4513 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
@@ -4460,6 +4517,7 @@ int transport_send_check_condition_and_sense(
4460 case TCM_CHECK_CONDITION_NOT_READY: 4517 case TCM_CHECK_CONDITION_NOT_READY:
4461 /* CURRENT ERROR */ 4518 /* CURRENT ERROR */
4462 buffer[offset] = 0x70; 4519 buffer[offset] = 0x70;
4520 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4463 /* Not Ready */ 4521 /* Not Ready */
4464 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; 4522 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4465 transport_get_sense_codes(cmd, &asc, &ascq); 4523 transport_get_sense_codes(cmd, &asc, &ascq);
@@ -4470,6 +4528,7 @@ int transport_send_check_condition_and_sense(
4470 default: 4528 default:
4471 /* CURRENT ERROR */ 4529 /* CURRENT ERROR */
4472 buffer[offset] = 0x70; 4530 buffer[offset] = 0x70;
4531 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4473 /* ILLEGAL REQUEST */ 4532 /* ILLEGAL REQUEST */
4474 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4533 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4475 /* LOGICAL UNIT COMMUNICATION FAILURE */ 4534 /* LOGICAL UNIT COMMUNICATION FAILURE */
@@ -4545,11 +4604,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
4545 cmd->se_tfo->queue_status(cmd); 4604 cmd->se_tfo->queue_status(cmd);
4546} 4605}
4547 4606
4548/* transport_generic_do_tmr(): 4607static int transport_generic_do_tmr(struct se_cmd *cmd)
4549 *
4550 *
4551 */
4552int transport_generic_do_tmr(struct se_cmd *cmd)
4553{ 4608{
4554 struct se_device *dev = cmd->se_dev; 4609 struct se_device *dev = cmd->se_dev;
4555 struct se_tmr_req *tmr = cmd->se_tmr_req; 4610 struct se_tmr_req *tmr = cmd->se_tmr_req;
@@ -4597,7 +4652,7 @@ static int transport_processing_thread(void *param)
4597{ 4652{
4598 int ret; 4653 int ret;
4599 struct se_cmd *cmd; 4654 struct se_cmd *cmd;
4600 struct se_device *dev = (struct se_device *) param; 4655 struct se_device *dev = param;
4601 4656
4602 while (!kthread_should_stop()) { 4657 while (!kthread_should_stop()) {
4603 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, 4658 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
@@ -4607,8 +4662,6 @@ static int transport_processing_thread(void *param)
4607 goto out; 4662 goto out;
4608 4663
4609get_cmd: 4664get_cmd:
4610 __transport_execute_tasks(dev);
4611
4612 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); 4665 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4613 if (!cmd) 4666 if (!cmd)
4614 continue; 4667 continue;
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 50a480db7a6..3e12f6bcfa1 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -30,13 +30,11 @@
30#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
31 31
32#include <target/target_core_base.h> 32#include <target/target_core_base.h>
33#include <target/target_core_device.h> 33#include <target/target_core_fabric.h>
34#include <target/target_core_transport.h>
35#include <target/target_core_fabric_ops.h>
36#include <target/target_core_configfs.h> 34#include <target/target_core_configfs.h>
37 35
36#include "target_core_internal.h"
38#include "target_core_alua.h" 37#include "target_core_alua.h"
39#include "target_core_hba.h"
40#include "target_core_pr.h" 38#include "target_core_pr.h"
41#include "target_core_ua.h" 39#include "target_core_ua.h"
42 40
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 71fc9cea5dc..addc18f727e 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -39,12 +39,8 @@
39#include <scsi/fc_encode.h> 39#include <scsi/fc_encode.h>
40 40
41#include <target/target_core_base.h> 41#include <target/target_core_base.h>
42#include <target/target_core_transport.h> 42#include <target/target_core_fabric.h>
43#include <target/target_core_fabric_ops.h>
44#include <target/target_core_device.h>
45#include <target/target_core_tpg.h>
46#include <target/target_core_configfs.h> 43#include <target/target_core_configfs.h>
47#include <target/target_core_tmr.h>
48#include <target/configfs_macros.h> 44#include <target/configfs_macros.h>
49 45
50#include "tcm_fc.h" 46#include "tcm_fc.h"
@@ -367,6 +363,11 @@ static void ft_send_tm(struct ft_cmd *cmd)
367 struct ft_sess *sess; 363 struct ft_sess *sess;
368 u8 tm_func; 364 u8 tm_func;
369 365
366 transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
367 cmd->sess->se_sess, 0, DMA_NONE, 0,
368 &cmd->ft_sense_buffer[0]);
369 target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false);
370
370 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 371 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
371 372
372 switch (fcp->fc_tm_flags) { 373 switch (fcp->fc_tm_flags) {
@@ -420,7 +421,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
420 sess = cmd->sess; 421 sess = cmd->sess;
421 transport_send_check_condition_and_sense(&cmd->se_cmd, 422 transport_send_check_condition_and_sense(&cmd->se_cmd,
422 cmd->se_cmd.scsi_sense_reason, 0); 423 cmd->se_cmd.scsi_sense_reason, 0);
423 transport_generic_free_cmd(&cmd->se_cmd, 0);
424 ft_sess_put(sess); 424 ft_sess_put(sess);
425 return; 425 return;
426 } 426 }
@@ -536,7 +536,6 @@ static void ft_send_work(struct work_struct *work)
536{ 536{
537 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); 537 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
538 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); 538 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
539 struct se_cmd *se_cmd;
540 struct fcp_cmnd *fcp; 539 struct fcp_cmnd *fcp;
541 int data_dir = 0; 540 int data_dir = 0;
542 u32 data_len; 541 u32 data_len;
@@ -591,15 +590,6 @@ static void ft_send_work(struct work_struct *work)
591 data_len = ntohl(fcp->fc_dl); 590 data_len = ntohl(fcp->fc_dl);
592 cmd->cdb = fcp->fc_cdb; 591 cmd->cdb = fcp->fc_cdb;
593 } 592 }
594
595 se_cmd = &cmd->se_cmd;
596 /*
597 * Initialize struct se_cmd descriptor from target_core_mod
598 * infrastructure
599 */
600 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
601 data_len, data_dir, task_attr,
602 &cmd->ft_sense_buffer[0]);
603 /* 593 /*
604 * Check for FCP task management flags 594 * Check for FCP task management flags
605 */ 595 */
@@ -607,39 +597,20 @@ static void ft_send_work(struct work_struct *work)
607 ft_send_tm(cmd); 597 ft_send_tm(cmd);
608 return; 598 return;
609 } 599 }
610
611 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); 600 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
612
613 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); 601 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
614 ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun); 602 /*
603 * Use a single se_cmd->cmd_kref as we expect to release se_cmd
604 * directly from ft_check_stop_free callback in response path.
605 */
606 ret = target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb,
607 &cmd->ft_sense_buffer[0], cmd->lun, data_len,
608 task_attr, data_dir, 0);
609 pr_debug("r_ctl %x alloc target_submit_cmd %d\n", fh->fh_r_ctl, ret);
615 if (ret < 0) { 610 if (ret < 0) {
616 ft_dump_cmd(cmd, __func__); 611 ft_dump_cmd(cmd, __func__);
617 transport_send_check_condition_and_sense(&cmd->se_cmd,
618 cmd->se_cmd.scsi_sense_reason, 0);
619 return;
620 }
621
622 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
623
624 pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
625 ft_dump_cmd(cmd, __func__);
626
627 if (ret == -ENOMEM) {
628 transport_send_check_condition_and_sense(se_cmd,
629 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
630 transport_generic_free_cmd(se_cmd, 0);
631 return;
632 }
633 if (ret == -EINVAL) {
634 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
635 ft_queue_status(se_cmd);
636 else
637 transport_send_check_condition_and_sense(se_cmd,
638 se_cmd->scsi_sense_reason, 0);
639 transport_generic_free_cmd(se_cmd, 0);
640 return; 612 return;
641 } 613 }
642 transport_handle_cdb_direct(se_cmd);
643 return; 614 return;
644 615
645err: 616err:
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 9402b7387ca..73852fbc857 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -41,12 +41,8 @@
41#include <scsi/libfc.h> 41#include <scsi/libfc.h>
42 42
43#include <target/target_core_base.h> 43#include <target/target_core_base.h>
44#include <target/target_core_transport.h> 44#include <target/target_core_fabric.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_fabric_configfs.h> 45#include <target/target_core_fabric_configfs.h>
47#include <target/target_core_fabric_lib.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tpg.h>
50#include <target/target_core_configfs.h> 46#include <target/target_core_configfs.h>
51#include <target/configfs_macros.h> 47#include <target/configfs_macros.h>
52 48
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1369b1cb103..d8cabc21036 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -48,10 +48,7 @@
48#include <scsi/fc_encode.h> 48#include <scsi/fc_encode.h>
49 49
50#include <target/target_core_base.h> 50#include <target/target_core_base.h>
51#include <target/target_core_transport.h> 51#include <target/target_core_fabric.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_device.h>
54#include <target/target_core_tpg.h>
55#include <target/target_core_configfs.h> 52#include <target/target_core_configfs.h>
56#include <target/configfs_macros.h> 53#include <target/configfs_macros.h>
57 54
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 326921385af..4c0507cf808 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -40,10 +40,7 @@
40#include <scsi/libfc.h> 40#include <scsi/libfc.h>
41 41
42#include <target/target_core_base.h> 42#include <target/target_core_base.h>
43#include <target/target_core_transport.h> 43#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_device.h>
46#include <target/target_core_tpg.h>
47#include <target/target_core_configfs.h> 44#include <target/target_core_configfs.h>
48#include <target/configfs_macros.h> 45#include <target/configfs_macros.h>
49 46
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 6958594f2fc..9ae024025ff 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -268,7 +268,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
268 struct dma_slave_config tx_conf = { 268 struct dma_slave_config tx_conf = {
269 .dst_addr = uap->port.mapbase + UART01x_DR, 269 .dst_addr = uap->port.mapbase + UART01x_DR,
270 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 270 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
271 .direction = DMA_TO_DEVICE, 271 .direction = DMA_MEM_TO_DEV,
272 .dst_maxburst = uap->fifosize >> 1, 272 .dst_maxburst = uap->fifosize >> 1,
273 }; 273 };
274 struct dma_chan *chan; 274 struct dma_chan *chan;
@@ -301,7 +301,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
301 struct dma_slave_config rx_conf = { 301 struct dma_slave_config rx_conf = {
302 .src_addr = uap->port.mapbase + UART01x_DR, 302 .src_addr = uap->port.mapbase + UART01x_DR,
303 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 303 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
304 .direction = DMA_FROM_DEVICE, 304 .direction = DMA_DEV_TO_MEM,
305 .src_maxburst = uap->fifosize >> 1, 305 .src_maxburst = uap->fifosize >> 1,
306 }; 306 };
307 307
@@ -480,7 +480,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
480 return -EBUSY; 480 return -EBUSY;
481 } 481 }
482 482
483 desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE, 483 desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
484 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 484 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
485 if (!desc) { 485 if (!desc) {
486 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); 486 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
@@ -676,7 +676,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
676 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 676 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
677 dma_dev = rxchan->device; 677 dma_dev = rxchan->device;
678 desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1, 678 desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
679 DMA_FROM_DEVICE, 679 DMA_DEV_TO_MEM,
680 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 680 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
681 /* 681 /*
682 * If the DMA engine is busy and cannot prepare a 682 * If the DMA engine is busy and cannot prepare a
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index de0f613ed6f..17ae65762d1 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -764,7 +764,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
764 sg_dma_address(sg) = priv->rx_buf_dma; 764 sg_dma_address(sg) = priv->rx_buf_dma;
765 765
766 desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx, 766 desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
767 sg, 1, DMA_FROM_DEVICE, 767 sg, 1, DMA_DEV_TO_MEM,
768 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 768 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
769 769
770 if (!desc) 770 if (!desc)
@@ -923,7 +923,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
923 } 923 }
924 924
925 desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx, 925 desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
926 priv->sg_tx_p, nent, DMA_TO_DEVICE, 926 priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
927 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 927 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
928 if (!desc) { 928 if (!desc) {
929 dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n", 929 dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n",
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 9e62349b3d9..75085795528 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1339,7 +1339,7 @@ static void sci_submit_rx(struct sci_port *s)
1339 struct dma_async_tx_descriptor *desc; 1339 struct dma_async_tx_descriptor *desc;
1340 1340
1341 desc = chan->device->device_prep_slave_sg(chan, 1341 desc = chan->device->device_prep_slave_sg(chan,
1342 sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); 1342 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
1343 1343
1344 if (desc) { 1344 if (desc) {
1345 s->desc_rx[i] = desc; 1345 s->desc_rx[i] = desc;
@@ -1454,7 +1454,7 @@ static void work_fn_tx(struct work_struct *work)
1454 BUG_ON(!sg_dma_len(sg)); 1454 BUG_ON(!sg_dma_len(sg));
1455 1455
1456 desc = chan->device->device_prep_slave_sg(chan, 1456 desc = chan->device->device_prep_slave_sg(chan,
1457 sg, s->sg_len_tx, DMA_TO_DEVICE, 1457 sg, s->sg_len_tx, DMA_MEM_TO_DEV,
1458 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1458 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1459 if (!desc) { 1459 if (!desc) {
1460 /* switch to PIO */ 1460 /* switch to PIO */
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 32793ce3d9e..9c2cc463389 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -183,7 +183,7 @@ static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op)
183 } 183 }
184 184
185 irq = irq_of_parse_and_map(dn, 0); 185 irq = irq_of_parse_and_map(dn, 0);
186 if (irq == NO_IRQ) { 186 if (!irq) {
187 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); 187 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
188 rv = -EBUSY; 188 rv = -EBUSY;
189 goto err_irq; 189 goto err_irq;
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index a163632877a..97cb45916c4 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -84,7 +84,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
84 struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; 84 struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
85 struct dma_chan *dma_chan = ux500_channel->dma_chan; 85 struct dma_chan *dma_chan = ux500_channel->dma_chan;
86 struct dma_async_tx_descriptor *dma_desc; 86 struct dma_async_tx_descriptor *dma_desc;
87 enum dma_data_direction direction; 87 enum dma_transfer_direction direction;
88 struct scatterlist sg; 88 struct scatterlist sg;
89 struct dma_slave_config slave_conf; 89 struct dma_slave_config slave_conf;
90 enum dma_slave_buswidth addr_width; 90 enum dma_slave_buswidth addr_width;
@@ -104,7 +104,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
104 sg_dma_address(&sg) = dma_addr; 104 sg_dma_address(&sg) = dma_addr;
105 sg_dma_len(&sg) = len; 105 sg_dma_len(&sg) = len;
106 106
107 direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 107 direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
108 addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE : 108 addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
109 DMA_SLAVE_BUSWIDTH_4_BYTES; 109 DMA_SLAVE_BUSWIDTH_4_BYTES;
110 110
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b51fcd80d24..72339bd6fca 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -772,10 +772,10 @@ static void usbhsf_dma_prepare_tasklet(unsigned long data)
772 struct dma_async_tx_descriptor *desc; 772 struct dma_async_tx_descriptor *desc;
773 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); 773 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
774 struct device *dev = usbhs_priv_to_dev(priv); 774 struct device *dev = usbhs_priv_to_dev(priv);
775 enum dma_data_direction dir; 775 enum dma_transfer_direction dir;
776 dma_cookie_t cookie; 776 dma_cookie_t cookie;
777 777
778 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 778 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
779 779
780 sg_init_table(&sg, 1); 780 sg_init_table(&sg, 1);
781 sg_set_page(&sg, virt_to_page(pkt->dma), 781 sg_set_page(&sg, virt_to_page(pkt->dma),
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 882a51fe7b3..9dab1f51dd4 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -856,9 +856,9 @@ static const struct file_operations vhost_net_fops = {
856}; 856};
857 857
858static struct miscdevice vhost_net_misc = { 858static struct miscdevice vhost_net_misc = {
859 MISC_DYNAMIC_MINOR, 859 .minor = VHOST_NET_MINOR,
860 "vhost-net", 860 .name = "vhost-net",
861 &vhost_net_fops, 861 .fops = &vhost_net_fops,
862}; 862};
863 863
864static int vhost_net_init(void) 864static int vhost_net_init(void)
@@ -879,3 +879,5 @@ MODULE_VERSION("0.0.1");
879MODULE_LICENSE("GPL v2"); 879MODULE_LICENSE("GPL v2");
880MODULE_AUTHOR("Michael S. Tsirkin"); 880MODULE_AUTHOR("Michael S. Tsirkin");
881MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); 881MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
882MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
883MODULE_ALIAS("devname:vhost-net");
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index e3406ab3130..727a5149d81 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -245,6 +245,7 @@ struct mx3fb_data {
245 245
246 uint32_t h_start_width; 246 uint32_t h_start_width;
247 uint32_t v_start_width; 247 uint32_t v_start_width;
248 enum disp_data_mapping disp_data_fmt;
248}; 249};
249 250
250struct dma_chan_request { 251struct dma_chan_request {
@@ -287,11 +288,14 @@ static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long r
287 __raw_writel(value, mx3fb->reg_base + reg); 288 __raw_writel(value, mx3fb->reg_base + reg);
288} 289}
289 290
290static const uint32_t di_mappings[] = { 291struct di_mapping {
291 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */ 292 uint32_t b0, b1, b2;
292 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */ 293};
293 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */ 294
294 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */ 295static const struct di_mapping di_mappings[] = {
296 [IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f },
297 [IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f },
298 [IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 },
295}; 299};
296 300
297static void sdc_fb_init(struct mx3fb_info *fbi) 301static void sdc_fb_init(struct mx3fb_info *fbi)
@@ -334,7 +338,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
334 /* This enables the channel */ 338 /* This enables the channel */
335 if (mx3_fbi->cookie < 0) { 339 if (mx3_fbi->cookie < 0) {
336 mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan, 340 mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
337 &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); 341 &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
338 if (!mx3_fbi->txd) { 342 if (!mx3_fbi->txd) {
339 dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n", 343 dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
340 dma_chan->chan_id); 344 dma_chan->chan_id);
@@ -425,7 +429,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
425 * @pixel_clk: desired pixel clock frequency in Hz. 429 * @pixel_clk: desired pixel clock frequency in Hz.
426 * @width: width of panel in pixels. 430 * @width: width of panel in pixels.
427 * @height: height of panel in pixels. 431 * @height: height of panel in pixels.
428 * @pixel_fmt: pixel format of buffer as FOURCC ASCII code.
429 * @h_start_width: number of pixel clocks between the HSYNC signal pulse 432 * @h_start_width: number of pixel clocks between the HSYNC signal pulse
430 * and the start of valid data. 433 * and the start of valid data.
431 * @h_sync_width: width of the HSYNC signal in units of pixel clocks. 434 * @h_sync_width: width of the HSYNC signal in units of pixel clocks.
@@ -442,7 +445,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
442static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, 445static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
443 uint32_t pixel_clk, 446 uint32_t pixel_clk,
444 uint16_t width, uint16_t height, 447 uint16_t width, uint16_t height,
445 enum pixel_fmt pixel_fmt,
446 uint16_t h_start_width, uint16_t h_sync_width, 448 uint16_t h_start_width, uint16_t h_sync_width,
447 uint16_t h_end_width, uint16_t v_start_width, 449 uint16_t h_end_width, uint16_t v_start_width,
448 uint16_t v_sync_width, uint16_t v_end_width, 450 uint16_t v_sync_width, uint16_t v_end_width,
@@ -453,6 +455,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
453 uint32_t old_conf; 455 uint32_t old_conf;
454 uint32_t div; 456 uint32_t div;
455 struct clk *ipu_clk; 457 struct clk *ipu_clk;
458 const struct di_mapping *map;
456 459
457 dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height); 460 dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
458 461
@@ -540,36 +543,10 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
540 sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT; 543 sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
541 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL); 544 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
542 545
543 switch (pixel_fmt) { 546 map = &di_mappings[mx3fb->disp_data_fmt];
544 case IPU_PIX_FMT_RGB24: 547 mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP);
545 mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP); 548 mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP);
546 mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP); 549 mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP);
547 mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
548 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
549 ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
550 break;
551 case IPU_PIX_FMT_RGB666:
552 mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
553 mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
554 mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
555 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
556 ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
557 break;
558 case IPU_PIX_FMT_BGR666:
559 mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
560 mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
561 mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
562 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
563 ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
564 break;
565 default:
566 mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
567 mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
568 mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
569 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
570 ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
571 break;
572 }
573 550
574 spin_unlock_irqrestore(&mx3fb->lock, lock_flags); 551 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
575 552
@@ -780,8 +757,6 @@ static int __set_par(struct fb_info *fbi, bool lock)
780 if (sdc_init_panel(mx3fb, mode, 757 if (sdc_init_panel(mx3fb, mode,
781 (PICOS2KHZ(fbi->var.pixclock)) * 1000UL, 758 (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
782 fbi->var.xres, fbi->var.yres, 759 fbi->var.xres, fbi->var.yres,
783 (fbi->var.sync & FB_SYNC_SWAP_RGB) ?
784 IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
785 fbi->var.left_margin, 760 fbi->var.left_margin,
786 fbi->var.hsync_len, 761 fbi->var.hsync_len,
787 fbi->var.right_margin + 762 fbi->var.right_margin +
@@ -1117,7 +1092,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
1117 async_tx_ack(mx3_fbi->txd); 1092 async_tx_ack(mx3_fbi->txd);
1118 1093
1119 txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg + 1094 txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
1120 mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); 1095 mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
1121 if (!txd) { 1096 if (!txd) {
1122 dev_err(fbi->device, 1097 dev_err(fbi->device,
1123 "Error preparing a DMA transaction descriptor.\n"); 1098 "Error preparing a DMA transaction descriptor.\n");
@@ -1349,6 +1324,12 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
1349 const struct fb_videomode *mode; 1324 const struct fb_videomode *mode;
1350 int ret, num_modes; 1325 int ret, num_modes;
1351 1326
1327 if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) {
1328 dev_err(dev, "Illegal display data format %d\n",
1329 mx3fb_pdata->disp_data_fmt);
1330 return -EINVAL;
1331 }
1332
1352 ichan->client = mx3fb; 1333 ichan->client = mx3fb;
1353 irq = ichan->eof_irq; 1334 irq = ichan->eof_irq;
1354 1335
@@ -1402,6 +1383,8 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
1402 mx3fbi->mx3fb = mx3fb; 1383 mx3fbi->mx3fb = mx3fb;
1403 mx3fbi->blank = FB_BLANK_NORMAL; 1384 mx3fbi->blank = FB_BLANK_NORMAL;
1404 1385
1386 mx3fb->disp_data_fmt = mx3fb_pdata->disp_data_fmt;
1387
1405 init_completion(&mx3fbi->flip_cmpl); 1388 init_completion(&mx3fbi->flip_cmpl);
1406 disable_irq(ichan->eof_irq); 1389 disable_irq(ichan->eof_irq);
1407 dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); 1390 dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index ba6eda4b514..0edb91c0de6 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -1,5 +1,6 @@
1#include <linux/bio.h> 1#include <linux/bio.h>
2#include <linux/io.h> 2#include <linux/io.h>
3#include <linux/export.h>
3#include <xen/page.h> 4#include <xen/page.h>
4 5
5bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 6bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
@@ -11,3 +12,4 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
11 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && 12 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
12 ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); 13 ((mfn1 == mfn2) || ((mfn1+1) == mfn2));
13} 14}
15EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 3832e303c33..596e6a7b17d 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -221,7 +221,7 @@ static int register_balloon(struct device *dev)
221{ 221{
222 int i, error; 222 int i, error;
223 223
224 error = bus_register(&balloon_subsys); 224 error = subsys_system_register(&balloon_subsys, NULL);
225 if (error) 225 if (error)
226 return error; 226 return error;
227 227