aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-06-25 13:06:12 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-06-25 13:10:36 -0400
commit7b0cfee1a24efdfe0235bac62e53f686fe8a8e24 (patch)
treeeeeb8cc3bf7be5ec0e54b7c4f3808ef88ecca012 /drivers
parent9756fe38d10b2bf90c81dc4d2f17d5632e135364 (diff)
parent6b16351acbd415e66ba16bf7d473ece1574cf0bc (diff)
Merge tag 'v3.5-rc4' into drm-intel-next-queued
I want to merge the "no more fake agp on gen6+" patches into drm-intel-next (well, the last pieces). But a patch in 3.5-rc4 also adds a new use of dev->agp. Hence the backmarge to sort this out, for otherwise drm-intel-next merged into Linus' tree would conflict in the relevant code, things would compile but nicely OOPS at driver load :( Conflicts in this merge are just simple cases of "both branches changed/added lines at the same place". The only tricky part is to keep the order correct wrt the unwind code in case of errors in intel_ringbuffer.c (and the MI_DISPLAY_FLIP #defines in i915_reg.h together, obviously). Conflicts: drivers/gpu/drm/i915/i915_reg.h drivers/gpu/drm/i915/intel_ringbuffer.c Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/battery.c10
-rw-r--r--drivers/acpi/bus.c88
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/processor_perflib.c30
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c57
-rw-r--r--drivers/acpi/video.c33
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/atm/solos-pci.c4
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/regmap/regmap.c10
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c4
-rw-r--r--drivers/bcma/driver_pci.c6
-rw-r--r--drivers/bcma/sprom.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c276
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h48
-rw-r--r--drivers/char/agp/intel-agp.c1
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/hw_random/atmel-rng.c9
-rw-r--r--drivers/clk/spear/clk-aux-synth.c2
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c2
-rw-r--r--drivers/clk/spear/clk-vco-pll.c2
-rw-r--r--drivers/clk/spear/clk.c2
-rw-r--r--drivers/clk/spear/clk.h2
-rw-r--r--drivers/clk/spear/spear1310_clock.c2
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/spear/spear3xx_clock.c2
-rw-r--r--drivers/clk/spear/spear6xx_clock.c2
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/em_sti.c406
-rw-r--r--drivers/clocksource/sh_cmt.c26
-rw-r--r--drivers/clocksource/sh_mtu2.c6
-rw-r--r--drivers/clocksource/sh_tmu.c16
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/pl330.c30
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/i7core_edac.c15
-rw-r--r--drivers/edac/mce_amd.h2
-rw-r--r--drivers/edac/mpc85xx_edac.c3
-rw-r--r--drivers/edac/sb_edac.c10
-rw-r--r--drivers/extcon/extcon-max8997.c5
-rw-r--r--drivers/extcon/extcon_class.c2
-rw-r--r--drivers/extcon/extcon_gpio.c2
-rw-r--r--drivers/gpio/gpio-samsung.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c19
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c12
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c38
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h43
-rw-r--r--drivers/gpu/drm/i915/intel_display.c21
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c60
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c19
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c385
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c49
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h12
-rw-r--r--drivers/gpu/drm/radeon/ni.c386
-rw-r--r--drivers/gpu/drm/radeon/nid.h11
-rw-r--r--drivers/gpu/drm/radeon/r600.c215
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c42
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/r600d.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c10
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770.c297
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h7
-rw-r--r--drivers/gpu/drm/radeon/si.c477
-rw-r--r--drivers/gpu/drm/radeon/si_reg.h72
-rw-r--r--drivers/gpu/drm/radeon/sid.h19
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c14
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c15
-rw-r--r--drivers/gpu/drm/via/via_map.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c27
-rw-r--r--drivers/hwmon/applesmc.c4
-rw-r--r--drivers/hwmon/coretemp.c33
-rw-r--r--drivers/hwmon/emc2103.c12
-rw-r--r--drivers/i2c/muxes/Kconfig12
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c279
-rw-r--r--drivers/ide/icside.c17
-rw-r--r--drivers/ide/ide-cs.c3
-rw-r--r--drivers/iio/Kconfig3
-rw-r--r--drivers/iio/industrialio-core.c16
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c21
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c21
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c27
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c64
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c17
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h1
-rw-r--r--drivers/iommu/amd_iommu.c71
-rw-r--r--drivers/iommu/amd_iommu_init.c13
-rw-r--r--drivers/iommu/amd_iommu_types.h3
-rw-r--r--drivers/leds/Kconfig4
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/led-core.c7
-rw-r--r--drivers/md/dm-mpath.c47
-rw-r--r--drivers/md/dm-thin-metadata.c136
-rw-r--r--drivers/md/dm-thin-metadata.h13
-rw-r--r--drivers/md/dm-thin.c203
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/media/video/pms.c1
-rw-r--r--drivers/message/fusion/mptbase.c13
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/mfd/db8500-prcmu.c1
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/misc/mei/interrupt.c2
-rw-r--r--drivers/misc/mei/main.c9
-rw-r--r--drivers/misc/mei/wd.c2
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h14
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/dw_mmc.c36
-rw-r--r--drivers/mmc/host/mmci.c19
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/omap.c18
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm63xxpart.c41
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c18
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c7
-rw-r--r--drivers/mtd/devices/docg3.c40
-rw-r--r--drivers/mtd/devices/m25p80.c5
-rw-r--r--drivers/mtd/devices/spear_smi.c14
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c2
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c13
-rw-r--r--drivers/mtd/maps/pci.c13
-rw-r--r--drivers/mtd/maps/scb2_flash.c15
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtdcore.c57
-rw-r--r--drivers/mtd/mtdoops.c22
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/nand/Kconfig42
-rw-r--r--drivers/mtd/nand/alauda.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c14
-rw-r--r--drivers/mtd/nand/au1550nd.c2
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c14
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c4
-rw-r--r--drivers/mtd/nand/cafe_nand.c35
-rw-r--r--drivers/mtd/nand/cs553x_nand.c1
-rw-r--r--drivers/mtd/nand/denali.c38
-rw-r--r--drivers/mtd/nand/docg4.c22
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c37
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c47
-rw-r--r--drivers/mtd/nand/fsmc_nand.c26
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h42
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c27
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c184
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/h1910.c1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c6
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c1
-rw-r--r--drivers/mtd/nand/mxc_nand.c636
-rw-r--r--drivers/mtd/nand/nand_base.c233
-rw-r--r--drivers/mtd/nand/nand_bbt.c1
-rw-r--r--drivers/mtd/nand/nand_ids.c6
-rw-r--r--drivers/mtd/nand/nandsim.c28
-rw-r--r--drivers/mtd/nand/omap2.c253
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/plat_nand.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/nand/r852.c22
-rw-r--r--drivers/mtd/nand/sh_flctl.c8
-rw-r--r--drivers/mtd/nand/sm_common.c9
-rw-r--r--drivers/mtd/onenand/onenand_base.c6
-rw-r--r--drivers/mtd/ubi/debug.c12
-rw-r--r--drivers/mtd/ubi/wl.c17
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
-rw-r--r--drivers/net/can/c_can/c_can.c16
-rw-r--r--drivers/net/can/c_can/c_can.h1
-rw-r--r--drivers/net/can/cc770/cc770_platform.c2
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c27
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c15
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c9
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c11
-rw-r--r--drivers/net/ethernet/rdc/r6040.c15
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c24
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c8
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h63
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c12
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/Makefile4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1898
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c12
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/usb/asix.c3
-rw-r--r--drivers/net/usb/mcs7830.c25
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/sierra_net.c14
-rw-r--r--drivers/net/virtio_net.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h178
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c16
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c21
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c20
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig8
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c9
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c22
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c13
-rw-r--r--drivers/net/wireless/mwifiex/fw.h6
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c13
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/nfc/pn544_hci.c2
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pci/quirks.c26
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx.c34
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c13
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c45
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/platform/x86/acer-wmi.c24
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/apple-gmux.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c308
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c34
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c9
-rw-r--r--drivers/platform/x86/sony-laptop.c1498
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c141
-rw-r--r--drivers/platform/x86/xo1-rfkill.c13
-rw-r--r--drivers/power/Kconfig10
-rw-r--r--drivers/power/ab8500_btemp.c12
-rw-r--r--drivers/power/ab8500_charger.c13
-rw-r--r--drivers/power/ab8500_fg.c12
-rw-r--r--drivers/power/charger-manager.c392
-rw-r--r--drivers/power/ds2781_battery.c20
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17042_battery.c148
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/sbs-battery.c2
-rw-r--r--drivers/power/smb347-charger.c712
-rw-r--r--drivers/rapidio/Kconfig14
-rw-r--r--drivers/rapidio/devices/Makefile3
-rw-r--r--drivers/rapidio/devices/tsi721.c211
-rw-r--r--drivers/rapidio/devices/tsi721.h105
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c823
-rw-r--r--drivers/rapidio/rio.c81
-rw-r--r--drivers/regulator/ab8500.c22
-rw-r--r--drivers/regulator/anatop-regulator.c2
-rw-r--r--drivers/regulator/core.c3
-rw-r--r--drivers/regulator/db8500-prcmu.c40
-rw-r--r--drivers/regulator/gpio-regulator.c16
-rw-r--r--drivers/regulator/max8649.c1
-rw-r--r--drivers/regulator/palmas-regulator.c7
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c10
-rw-r--r--drivers/rtc/rtc-cmos.c9
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfad_attr.c17
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c18
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c173
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c39
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c122
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c21
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c200
-rw-r--r--drivers/scsi/fcoe/fcoe.h8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c159
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c832
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c13
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/qla2xxx/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h78
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c615
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c4972
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1004
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c1919
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h82
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c134
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h28
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c95
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c111
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c738
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h192
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c78
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_pm.c5
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_wait_scan.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/staging/comedi/drivers.c5
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c2
-rw-r--r--drivers/staging/iio/Documentation/device.txt2
-rw-r--r--drivers/staging/iio/adc/Kconfig1
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c3
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c10
-rw-r--r--drivers/staging/ramster/zcache-main.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c10
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_file.c70
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/tty/amiserial.c14
-rw-r--r--drivers/tty/cyclades.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c31
-rw-r--r--drivers/tty/n_r3964.c11
-rw-r--r--drivers/tty/pty.c25
-rw-r--r--drivers/tty/serial/8250/8250.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c45
-rw-r--r--drivers/tty/serial/crisv10.c8
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c38
-rw-r--r--drivers/tty/synclink.c4
-rw-r--r--drivers/tty/synclink_gt.c4
-rw-r--r--drivers/tty/synclinkmp.c4
-rw-r--r--drivers/tty/tty_io.c67
-rw-r--r--drivers/tty/tty_ldisc.c67
-rw-r--r--drivers/tty/tty_mutex.c60
-rw-r--r--drivers/tty/tty_port.c6
-rw-r--r--drivers/usb/class/cdc-acm.c8
-rw-r--r--drivers/usb/class/cdc-wdm.c9
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/message.c3
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h4
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-omap.c168
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-sh.c3
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c4
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mem.c74
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/musb/davinci.c1
-rw-r--r--drivers/usb/musb/davinci.h4
-rw-r--r--drivers/usb/musb/musb_gadget.c1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/generic.c10
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/mos7840.c2
-rw-r--r--drivers/usb/serial/option.c96
-rw-r--r--drivers/usb/serial/qcserial.c6
-rw-r--r--drivers/usb/serial/sierra.c4
-rw-r--r--drivers/usb/serial/usb-serial.c12
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/video/Kconfig35
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/auo_k1900fb.c198
-rw-r--r--drivers/video/auo_k1901fb.c251
-rw-r--r--drivers/video/auo_k190x.c1046
-rw-r--r--drivers/video/auo_k190x.h129
-rw-r--r--drivers/video/backlight/Kconfig2
-rw-r--r--drivers/video/backlight/ili9320.c2
-rw-r--r--drivers/video/bfin_adv7393fb.c49
-rw-r--r--drivers/video/broadsheetfb.c2
-rw-r--r--drivers/video/cobalt_lcdfb.c45
-rw-r--r--drivers/video/console/Kconfig14
-rw-r--r--drivers/video/ep93xx-fb.c32
-rw-r--r--drivers/video/exynos/exynos_dp_core.c69
-rw-r--r--drivers/video/exynos/exynos_dp_core.h3
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c45
-rw-r--r--drivers/video/exynos/exynos_dp_reg.h29
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c49
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c36
-rw-r--r--drivers/video/exynos/s6e8ax0.c15
-rw-r--r--drivers/video/fb_defio.c6
-rw-r--r--drivers/video/fbsysfs.c2
-rw-r--r--drivers/video/fsl-diu-fb.c1
-rw-r--r--drivers/video/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/mb862xx/mb862xx-i2c.c2
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/mbx/mbxfb.c4
-rw-r--r--drivers/video/mxsfb.c13
-rw-r--r--drivers/video/omap/Kconfig8
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c7
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c107
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c8
-rw-r--r--drivers/video/omap2/displays/panel-taal.c90
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c76
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c22
-rw-r--r--drivers/video/omap2/dss/Kconfig13
-rw-r--r--drivers/video/omap2/dss/apply.c134
-rw-r--r--drivers/video/omap2/dss/core.c254
-rw-r--r--drivers/video/omap2/dss/dispc.c747
-rw-r--r--drivers/video/omap2/dss/dispc.h72
-rw-r--r--drivers/video/omap2/dss/display.c49
-rw-r--r--drivers/video/omap2/dss/dpi.c75
-rw-r--r--drivers/video/omap2/dss/dsi.c404
-rw-r--r--drivers/video/omap2/dss/dss.c67
-rw-r--r--drivers/video/omap2/dss/dss.h151
-rw-r--r--drivers/video/omap2/dss/dss_features.c30
-rw-r--r--drivers/video/omap2/dss/dss_features.h5
-rw-r--r--drivers/video/omap2/dss/hdmi.c443
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c236
-rw-r--r--drivers/video/omap2/dss/manager.c19
-rw-r--r--drivers/video/omap2/dss/overlay.c16
-rw-r--r--drivers/video/omap2/dss/rfbi.c84
-rw-r--r--drivers/video/omap2/dss/sdi.c63
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h32
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c480
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h161
-rw-r--r--drivers/video/omap2/dss/venc.c133
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c17
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c12
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h1
-rw-r--r--drivers/video/omap2/vrfb.c4
-rw-r--r--drivers/video/pxa3xx-gcu.c5
-rw-r--r--drivers/video/s3c-fb.c160
-rw-r--r--drivers/video/savage/savagefb_driver.c10
-rw-r--r--drivers/video/sh_mobile_hdmi.c219
-rw-r--r--drivers/video/sis/init.h45
-rw-r--r--drivers/video/sis/sis_main.c41
-rw-r--r--drivers/video/skeletonfb.c2
-rw-r--r--drivers/video/smscufx.c4
-rw-r--r--drivers/video/udlfb.c2
-rw-r--r--drivers/video/via/viafbdev.c34
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/xen/events.c9
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/tmem.c8
561 files changed, 27568 insertions, 7936 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 47768ff87343..80998958cf45 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -208,7 +208,7 @@ config ACPI_IPMI
208 208
209config ACPI_HOTPLUG_CPU 209config ACPI_HOTPLUG_CPU
210 bool 210 bool
211 depends on ACPI_PROCESSOR && HOTPLUG_CPU 211 depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
212 select ACPI_CONTAINER 212 select ACPI_CONTAINER
213 default y 213 default y
214 214
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 86933ca8b472..7dd3f9fb9f3f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
643 643
644static void acpi_battery_refresh(struct acpi_battery *battery) 644static void acpi_battery_refresh(struct acpi_battery *battery)
645{ 645{
646 int power_unit;
647
646 if (!battery->bat.dev) 648 if (!battery->bat.dev)
647 return; 649 return;
648 650
651 power_unit = battery->power_unit;
652
649 acpi_battery_get_info(battery); 653 acpi_battery_get_info(battery);
650 /* The battery may have changed its reporting units. */ 654
655 if (power_unit == battery->power_unit)
656 return;
657
658 /* The battery has changed its reporting units. */
651 sysfs_remove_battery(battery); 659 sysfs_remove_battery(battery);
652 sysfs_add_battery(battery); 660 sysfs_add_battery(battery);
653} 661}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3188da3df8da..adceafda9c17 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
182 Power Management 182 Power Management
183 -------------------------------------------------------------------------- */ 183 -------------------------------------------------------------------------- */
184 184
185static const char *state_string(int state)
186{
187 switch (state) {
188 case ACPI_STATE_D0:
189 return "D0";
190 case ACPI_STATE_D1:
191 return "D1";
192 case ACPI_STATE_D2:
193 return "D2";
194 case ACPI_STATE_D3_HOT:
195 return "D3hot";
196 case ACPI_STATE_D3_COLD:
197 return "D3";
198 default:
199 return "(unknown)";
200 }
201}
202
185static int __acpi_bus_get_power(struct acpi_device *device, int *state) 203static int __acpi_bus_get_power(struct acpi_device *device, int *state)
186{ 204{
187 int result = 0; 205 int result = ACPI_STATE_UNKNOWN;
188 acpi_status status = 0;
189 unsigned long long psc = 0;
190 206
191 if (!device || !state) 207 if (!device || !state)
192 return -EINVAL; 208 return -EINVAL;
193 209
194 *state = ACPI_STATE_UNKNOWN; 210 if (!device->flags.power_manageable) {
195
196 if (device->flags.power_manageable) {
197 /*
198 * Get the device's power state either directly (via _PSC) or
199 * indirectly (via power resources).
200 */
201 if (device->power.flags.power_resources) {
202 result = acpi_power_get_inferred_state(device, state);
203 if (result)
204 return result;
205 } else if (device->power.flags.explicit_get) {
206 status = acpi_evaluate_integer(device->handle, "_PSC",
207 NULL, &psc);
208 if (ACPI_FAILURE(status))
209 return -ENODEV;
210 *state = (int)psc;
211 }
212 } else {
213 /* TBD: Non-recursive algorithm for walking up hierarchy. */ 211 /* TBD: Non-recursive algorithm for walking up hierarchy. */
214 *state = device->parent ? 212 *state = device->parent ?
215 device->parent->power.state : ACPI_STATE_D0; 213 device->parent->power.state : ACPI_STATE_D0;
214 goto out;
215 }
216
217 /*
218 * Get the device's power state either directly (via _PSC) or
219 * indirectly (via power resources).
220 */
221 if (device->power.flags.explicit_get) {
222 unsigned long long psc;
223 acpi_status status = acpi_evaluate_integer(device->handle,
224 "_PSC", NULL, &psc);
225 if (ACPI_FAILURE(status))
226 return -ENODEV;
227
228 result = psc;
229 }
230 /* The test below covers ACPI_STATE_UNKNOWN too. */
231 if (result <= ACPI_STATE_D2) {
232 ; /* Do nothing. */
233 } else if (device->power.flags.power_resources) {
234 int error = acpi_power_get_inferred_state(device, &result);
235 if (error)
236 return error;
237 } else if (result == ACPI_STATE_D3_HOT) {
238 result = ACPI_STATE_D3;
216 } 239 }
240 *state = result;
217 241
218 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", 242 out:
219 device->pnp.bus_id, *state)); 243 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
244 device->pnp.bus_id, state_string(*state)));
220 245
221 return 0; 246 return 0;
222} 247}
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
234 /* Make sure this is a valid target state */ 259 /* Make sure this is a valid target state */
235 260
236 if (state == device->power.state) { 261 if (state == device->power.state) {
237 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", 262 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
238 state)); 263 state_string(state)));
239 return 0; 264 return 0;
240 } 265 }
241 266
242 if (!device->power.states[state].flags.valid) { 267 if (!device->power.states[state].flags.valid) {
243 printk(KERN_WARNING PREFIX "Device does not support D%d\n", state); 268 printk(KERN_WARNING PREFIX "Device does not support %s\n",
269 state_string(state));
244 return -ENODEV; 270 return -ENODEV;
245 } 271 }
246 if (device->parent && (state < device->parent->power.state)) { 272 if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
294 end: 320 end:
295 if (result) 321 if (result)
296 printk(KERN_WARNING PREFIX 322 printk(KERN_WARNING PREFIX
297 "Device [%s] failed to transition to D%d\n", 323 "Device [%s] failed to transition to %s\n",
298 device->pnp.bus_id, state); 324 device->pnp.bus_id, state_string(state));
299 else { 325 else {
300 device->power.state = state; 326 device->power.state = state;
301 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 327 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
302 "Device [%s] transitioned to D%d\n", 328 "Device [%s] transitioned to %s\n",
303 device->pnp.bus_id, state)); 329 device->pnp.bus_id, state_string(state)));
304 } 330 }
305 331
306 return result; 332 return result;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0500f719f63e..dd6d6a3c6780 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
631 * We know a device's inferred power state when all the resources 631 * We know a device's inferred power state when all the resources
632 * required for a given D-state are 'on'. 632 * required for a given D-state are 'on'.
633 */ 633 */
634 for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) { 634 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
635 list = &device->power.states[i].resources; 635 list = &device->power.states[i].resources;
636 if (list->count < 1) 636 if (list->count < 1)
637 continue; 637 continue;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0af48a8554cd..a093dc163a42 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
333 struct acpi_buffer state = { 0, NULL }; 333 struct acpi_buffer state = { 0, NULL };
334 union acpi_object *pss = NULL; 334 union acpi_object *pss = NULL;
335 int i; 335 int i;
336 int last_invalid = -1;
336 337
337 338
338 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 339 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
@@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
394 ((u32)(px->core_frequency * 1000) != 395 ((u32)(px->core_frequency * 1000) !=
395 (px->core_frequency * 1000))) { 396 (px->core_frequency * 1000))) {
396 printk(KERN_ERR FW_BUG PREFIX 397 printk(KERN_ERR FW_BUG PREFIX
397 "Invalid BIOS _PSS frequency: 0x%llx MHz\n", 398 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
398 px->core_frequency); 399 pr->id, px->core_frequency);
399 result = -EFAULT; 400 if (last_invalid == -1)
400 kfree(pr->performance->states); 401 last_invalid = i;
401 goto end; 402 } else {
403 if (last_invalid != -1) {
404 /*
405 * Copy this valid entry over last_invalid entry
406 */
407 memcpy(&(pr->performance->states[last_invalid]),
408 px, sizeof(struct acpi_processor_px));
409 ++last_invalid;
410 }
402 } 411 }
403 } 412 }
404 413
414 if (last_invalid == 0) {
415 printk(KERN_ERR FW_BUG PREFIX
416 "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
417 result = -EFAULT;
418 kfree(pr->performance->states);
419 pr->performance->states = NULL;
420 }
421
422 if (last_invalid > 0)
423 pr->performance->state_count = last_invalid;
424
405 end: 425 end:
406 kfree(buffer.pointer); 426 kfree(buffer.pointer);
407 427
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 85cbfdccc97c..c8a1f3b68110 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void)
1567 ACPI_BUS_TYPE_POWER_BUTTON, 1567 ACPI_BUS_TYPE_POWER_BUTTON,
1568 ACPI_STA_DEFAULT, 1568 ACPI_STA_DEFAULT,
1569 &ops); 1569 &ops);
1570 device_init_wakeup(&device->dev, true);
1570 } 1571 }
1571 1572
1572 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { 1573 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index ebaa04593236..88561029cca8 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -25,8 +25,6 @@
25#include <acpi/acpi_bus.h> 25#include <acpi/acpi_bus.h>
26#include <acpi/acpi_drivers.h> 26#include <acpi/acpi_drivers.h>
27 27
28#include <asm/realmode.h>
29
30#include "internal.h" 28#include "internal.h"
31#include "sleep.h" 29#include "sleep.h"
32 30
@@ -59,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
59MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); 57MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
60 58
61static u8 sleep_states[ACPI_S_STATE_COUNT]; 59static u8 sleep_states[ACPI_S_STATE_COUNT];
60static bool pwr_btn_event_pending;
62 61
63static void acpi_sleep_tts_switch(u32 acpi_state) 62static void acpi_sleep_tts_switch(u32 acpi_state)
64{ 63{
@@ -93,13 +92,11 @@ static struct notifier_block tts_notifier = {
93static int acpi_sleep_prepare(u32 acpi_state) 92static int acpi_sleep_prepare(u32 acpi_state)
94{ 93{
95#ifdef CONFIG_ACPI_SLEEP 94#ifdef CONFIG_ACPI_SLEEP
96 unsigned long wakeup_pa = real_mode_header->wakeup_start;
97 /* do we have a wakeup address for S2 and S3? */ 95 /* do we have a wakeup address for S2 and S3? */
98 if (acpi_state == ACPI_STATE_S3) { 96 if (acpi_state == ACPI_STATE_S3) {
99 if (!wakeup_pa) 97 if (!acpi_wakeup_address)
100 return -EFAULT; 98 return -EFAULT;
101 acpi_set_firmware_waking_vector( 99 acpi_set_firmware_waking_vector(acpi_wakeup_address);
102 (acpi_physical_address)wakeup_pa);
103 100
104 } 101 }
105 ACPI_FLUSH_CPU_CACHE(); 102 ACPI_FLUSH_CPU_CACHE();
@@ -188,6 +185,14 @@ static int acpi_pm_prepare(void)
188 return error; 185 return error;
189} 186}
190 187
188static int find_powerf_dev(struct device *dev, void *data)
189{
190 struct acpi_device *device = to_acpi_device(dev);
191 const char *hid = acpi_device_hid(device);
192
193 return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
194}
195
191/** 196/**
192 * acpi_pm_finish - Instruct the platform to leave a sleep state. 197 * acpi_pm_finish - Instruct the platform to leave a sleep state.
193 * 198 *
@@ -196,6 +201,7 @@ static int acpi_pm_prepare(void)
196 */ 201 */
197static void acpi_pm_finish(void) 202static void acpi_pm_finish(void)
198{ 203{
204 struct device *pwr_btn_dev;
199 u32 acpi_state = acpi_target_sleep_state; 205 u32 acpi_state = acpi_target_sleep_state;
200 206
201 acpi_ec_unblock_transactions(); 207 acpi_ec_unblock_transactions();
@@ -213,6 +219,23 @@ static void acpi_pm_finish(void)
213 acpi_set_firmware_waking_vector((acpi_physical_address) 0); 219 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
214 220
215 acpi_target_sleep_state = ACPI_STATE_S0; 221 acpi_target_sleep_state = ACPI_STATE_S0;
222
223 /* If we were woken with the fixed power button, provide a small
224 * hint to userspace in the form of a wakeup event on the fixed power
225 * button device (if it can be found).
226 *
227 * We delay the event generation til now, as the PM layer requires
228 * timekeeping to be running before we generate events. */
229 if (!pwr_btn_event_pending)
230 return;
231
232 pwr_btn_event_pending = false;
233 pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
234 find_powerf_dev);
235 if (pwr_btn_dev) {
236 pm_wakeup_event(pwr_btn_dev, 0);
237 put_device(pwr_btn_dev);
238 }
216} 239}
217 240
218/** 241/**
@@ -302,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
302 /* ACPI 3.0 specs (P62) says that it's the responsibility 325 /* ACPI 3.0 specs (P62) says that it's the responsibility
303 * of the OSPM to clear the status bit [ implying that the 326 * of the OSPM to clear the status bit [ implying that the
304 * POWER_BUTTON event should not reach userspace ] 327 * POWER_BUTTON event should not reach userspace ]
328 *
329 * However, we do generate a small hint for userspace in the form of
330 * a wakeup event. We flag this condition for now and generate the
331 * event later, as we're currently too early in resume to be able to
332 * generate wakeup events.
305 */ 333 */
306 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) 334 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
307 acpi_clear_event(ACPI_EVENT_POWER_BUTTON); 335 acpi_event_status pwr_btn_status;
336
337 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
338
339 if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
340 acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
341 /* Flag for later */
342 pwr_btn_event_pending = true;
343 }
344 }
308 345
309 /* 346 /*
310 * Disable and clear GPE status before interrupt is enabled. Some GPEs 347 * Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -734,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
734 * can wake the system. _S0W may be valid, too. 771 * can wake the system. _S0W may be valid, too.
735 */ 772 */
736 if (acpi_target_sleep_state == ACPI_STATE_S0 || 773 if (acpi_target_sleep_state == ACPI_STATE_S0 ||
737 (device_may_wakeup(dev) && 774 (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
738 adev->wakeup.sleep_state <= acpi_target_sleep_state)) { 775 adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
739 acpi_status status; 776 acpi_status status;
740 777
741 acpi_method[3] = 'W'; 778 acpi_method[3] = 'W';
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 9577b6fa2650..a576575617d7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1687,10 +1687,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
1687 set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); 1687 set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
1688 set_bit(KEY_DISPLAY_OFF, input->keybit); 1688 set_bit(KEY_DISPLAY_OFF, input->keybit);
1689 1689
1690 error = input_register_device(input);
1691 if (error)
1692 goto err_stop_video;
1693
1694 printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", 1690 printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
1695 ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), 1691 ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
1696 video->flags.multihead ? "yes" : "no", 1692 video->flags.multihead ? "yes" : "no",
@@ -1701,12 +1697,16 @@ static int acpi_video_bus_add(struct acpi_device *device)
1701 video->pm_nb.priority = 0; 1697 video->pm_nb.priority = 0;
1702 error = register_pm_notifier(&video->pm_nb); 1698 error = register_pm_notifier(&video->pm_nb);
1703 if (error) 1699 if (error)
1704 goto err_unregister_input_dev; 1700 goto err_stop_video;
1701
1702 error = input_register_device(input);
1703 if (error)
1704 goto err_unregister_pm_notifier;
1705 1705
1706 return 0; 1706 return 0;
1707 1707
1708 err_unregister_input_dev: 1708 err_unregister_pm_notifier:
1709 input_unregister_device(input); 1709 unregister_pm_notifier(&video->pm_nb);
1710 err_stop_video: 1710 err_stop_video:
1711 acpi_video_bus_stop_devices(video); 1711 acpi_video_bus_stop_devices(video);
1712 err_free_input_dev: 1712 err_free_input_dev:
@@ -1743,9 +1743,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
1743 return 0; 1743 return 0;
1744} 1744}
1745 1745
1746static int __init is_i740(struct pci_dev *dev)
1747{
1748 if (dev->device == 0x00D1)
1749 return 1;
1750 if (dev->device == 0x7000)
1751 return 1;
1752 return 0;
1753}
1754
1746static int __init intel_opregion_present(void) 1755static int __init intel_opregion_present(void)
1747{ 1756{
1748#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE) 1757 int opregion = 0;
1749 struct pci_dev *dev = NULL; 1758 struct pci_dev *dev = NULL;
1750 u32 address; 1759 u32 address;
1751 1760
@@ -1754,13 +1763,15 @@ static int __init intel_opregion_present(void)
1754 continue; 1763 continue;
1755 if (dev->vendor != PCI_VENDOR_ID_INTEL) 1764 if (dev->vendor != PCI_VENDOR_ID_INTEL)
1756 continue; 1765 continue;
1766 /* We don't want to poke around undefined i740 registers */
1767 if (is_i740(dev))
1768 continue;
1757 pci_read_config_dword(dev, 0xfc, &address); 1769 pci_read_config_dword(dev, 0xfc, &address);
1758 if (!address) 1770 if (!address)
1759 continue; 1771 continue;
1760 return 1; 1772 opregion = 1;
1761 } 1773 }
1762#endif 1774 return opregion;
1763 return 0;
1764} 1775}
1765 1776
1766int acpi_video_register(void) 1777int acpi_video_register(void)
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 3239517f4d90..ac6a5beb28f3 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
4 * Arasan Compact Flash host controller source file 4 * Arasan Compact Flash host controller source file
5 * 5 *
6 * Copyright (C) 2011 ST Microelectronics 6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
@@ -959,7 +959,7 @@ static struct platform_driver arasan_cf_driver = {
959 959
960module_platform_driver(arasan_cf_driver); 960module_platform_driver(arasan_cf_driver);
961 961
962MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 962MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
963MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); 963MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
964MODULE_LICENSE("GPL"); 964MODULE_LICENSE("GPL");
965MODULE_ALIAS("platform:" DRIVER_NAME); 965MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e8cd652d2017..98510931c815 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
984 } else if (skb && card->using_dma) { 984 } else if (skb && card->using_dma) {
985 SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data, 985 SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
986 skb->len, PCI_DMA_TODEVICE); 986 skb->len, PCI_DMA_TODEVICE);
987 card->tx_skb[port] = skb;
987 iowrite32(SKB_CB(skb)->dma_addr, 988 iowrite32(SKB_CB(skb)->dma_addr,
988 card->config_regs + TX_DMA_ADDR(port)); 989 card->config_regs + TX_DMA_ADDR(port));
989 } 990 }
@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1152 db_fpga_upgrade = db_firmware_upgrade = 0; 1153 db_fpga_upgrade = db_firmware_upgrade = 0;
1153 } 1154 }
1154 1155
1155 if (card->fpga_version >= DMA_SUPPORTED){ 1156 if (card->fpga_version >= DMA_SUPPORTED) {
1157 pci_set_master(dev);
1156 card->using_dma = 1; 1158 card->using_dma = 1;
1157 } else { 1159 } else {
1158 card->using_dma = 0; 1160 card->using_dma = 0;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1b1cbb571d38..dcb8a6e48692 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -100,7 +100,7 @@ static void driver_deferred_probe_add(struct device *dev)
100 mutex_lock(&deferred_probe_mutex); 100 mutex_lock(&deferred_probe_mutex);
101 if (list_empty(&dev->p->deferred_probe)) { 101 if (list_empty(&dev->p->deferred_probe)) {
102 dev_dbg(dev, "Added to deferred list\n"); 102 dev_dbg(dev, "Added to deferred list\n");
103 list_add(&dev->p->deferred_probe, &deferred_probe_pending_list); 103 list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
104 } 104 }
105 mutex_unlock(&deferred_probe_mutex); 105 mutex_unlock(&deferred_probe_mutex);
106} 106}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0bcda488f11c..c89aa01fb1de 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev,
246 map->lock = regmap_lock_mutex; 246 map->lock = regmap_lock_mutex;
247 map->unlock = regmap_unlock_mutex; 247 map->unlock = regmap_unlock_mutex;
248 } 248 }
249 map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
250 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 249 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
251 map->format.pad_bytes = config->pad_bits / 8; 250 map->format.pad_bytes = config->pad_bits / 8;
252 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 251 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
253 map->format.buf_size += map->format.pad_bytes; 252 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
253 config->val_bits + config->pad_bits, 8);
254 map->reg_shift = config->pad_bits % 8; 254 map->reg_shift = config->pad_bits % 8;
255 if (config->reg_stride) 255 if (config->reg_stride)
256 map->reg_stride = config->reg_stride; 256 map->reg_stride = config->reg_stride;
@@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev,
368 368
369 ret = regcache_init(map, config); 369 ret = regcache_init(map, config);
370 if (ret < 0) 370 if (ret < 0)
371 goto err_free_workbuf; 371 goto err_debugfs;
372 372
373 /* Add a devres resource for dev_get_regmap() */ 373 /* Add a devres resource for dev_get_regmap() */
374 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 374 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
@@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev,
383 383
384err_cache: 384err_cache:
385 regcache_exit(map); 385 regcache_exit(map);
386err_free_workbuf: 386err_debugfs:
387 regmap_debugfs_exit(map);
387 kfree(map->work_buf); 388 kfree(map->work_buf);
388err_map: 389err_map:
389 kfree(map); 390 kfree(map);
@@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
471 472
472 return ret; 473 return ret;
473} 474}
475EXPORT_SYMBOL_GPL(regmap_reinit_cache);
474 476
475/** 477/**
476 * regmap_exit(): Free a previously allocated register map 478 * regmap_exit(): Free a previously allocated register map
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index ba29b2e73d48..72b5e7280d14 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -42,7 +42,7 @@ struct device *soc_device_to_device(struct soc_device *soc_dev)
42 return &soc_dev->dev; 42 return &soc_dev->dev;
43} 43}
44 44
45static mode_t soc_attribute_mode(struct kobject *kobj, 45static umode_t soc_attribute_mode(struct kobject *kobj,
46 struct attribute *attr, 46 struct attribute *attr,
47 int index) 47 int index)
48{ 48{
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index a058842f14fd..61ce4054b3c3 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
139 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); 139 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
140 break; 140 break;
141 case 0x4331: 141 case 0x4331:
142 /* BCM4331 workaround is SPROM-related, we put it in sprom.c */ 142 case 43431:
143 /* Ext PA lines must be enabled for tx on BCM4331 */
144 bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
143 break; 145 break;
144 case 43224: 146 case 43224:
145 if (bus->chipinfo.rev == 0) { 147 if (bus->chipinfo.rev == 0) {
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 9a96f14c8f47..c32ebd537abe 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
232int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 232int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
233 bool enable) 233 bool enable)
234{ 234{
235 struct pci_dev *pdev = pc->core->bus->host_pci; 235 struct pci_dev *pdev;
236 u32 coremask, tmp; 236 u32 coremask, tmp;
237 int err = 0; 237 int err = 0;
238 238
239 if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) { 239 if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
240 /* This bcma device is not on a PCI host-bus. So the IRQs are 240 /* This bcma device is not on a PCI host-bus. So the IRQs are
241 * not routed through the PCI core. 241 * not routed through the PCI core.
242 * So we must not enable routing through the PCI core. */ 242 * So we must not enable routing through the PCI core. */
243 goto out; 243 goto out;
244 } 244 }
245 245
246 pdev = pc->core->bus->host_pci;
247
246 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); 248 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
247 if (err) 249 if (err)
248 goto out; 250 goto out;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index c7f93359acb0..f16f42d36071 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -579,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus)
579 if (!sprom) 579 if (!sprom)
580 return -ENOMEM; 580 return -ENOMEM;
581 581
582 if (bus->chipinfo.id == 0x4331) 582 if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
583 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); 583 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
584 584
585 pr_debug("SPROM offset 0x%x\n", offset); 585 pr_debug("SPROM offset 0x%x\n", offset);
586 bcma_sprom_read(bus, offset, sprom); 586 bcma_sprom_read(bus, offset, sprom);
587 587
588 if (bus->chipinfo.id == 0x4331) 588 if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
589 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); 589 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
590 590
591 err = bcma_sprom_valid(sprom); 591 err = bcma_sprom_valid(sprom);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 304000c3d433..264bc77dcb91 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -294,18 +294,16 @@ static int hba_reset_nosleep(struct driver_data *dd)
294 */ 294 */
295static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) 295static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
296{ 296{
297 unsigned long flags = 0;
298
299 atomic_set(&port->commands[tag].active, 1); 297 atomic_set(&port->commands[tag].active, 1);
300 298
301 spin_lock_irqsave(&port->cmd_issue_lock, flags); 299 spin_lock(&port->cmd_issue_lock);
302 300
303 writel((1 << MTIP_TAG_BIT(tag)), 301 writel((1 << MTIP_TAG_BIT(tag)),
304 port->s_active[MTIP_TAG_INDEX(tag)]); 302 port->s_active[MTIP_TAG_INDEX(tag)]);
305 writel((1 << MTIP_TAG_BIT(tag)), 303 writel((1 << MTIP_TAG_BIT(tag)),
306 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 304 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
307 305
308 spin_unlock_irqrestore(&port->cmd_issue_lock, flags); 306 spin_unlock(&port->cmd_issue_lock);
309 307
310 /* Set the command's timeout value.*/ 308 /* Set the command's timeout value.*/
311 port->commands[tag].comp_time = jiffies + msecs_to_jiffies( 309 port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -436,8 +434,7 @@ static void mtip_init_port(struct mtip_port *port)
436 writel(0xFFFFFFFF, port->completed[i]); 434 writel(0xFFFFFFFF, port->completed[i]);
437 435
438 /* Clear any pending interrupts for this port */ 436 /* Clear any pending interrupts for this port */
439 writel(readl(port->dd->mmio + PORT_IRQ_STAT), 437 writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
440 port->dd->mmio + PORT_IRQ_STAT);
441 438
442 /* Clear any pending interrupts on the HBA. */ 439 /* Clear any pending interrupts on the HBA. */
443 writel(readl(port->dd->mmio + HOST_IRQ_STAT), 440 writel(readl(port->dd->mmio + HOST_IRQ_STAT),
@@ -782,13 +779,24 @@ static void mtip_handle_tfe(struct driver_data *dd)
782 779
783 /* Stop the timer to prevent command timeouts. */ 780 /* Stop the timer to prevent command timeouts. */
784 del_timer(&port->cmd_timer); 781 del_timer(&port->cmd_timer);
782 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
783
784 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
785 test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
786 cmd = &port->commands[MTIP_TAG_INTERNAL];
787 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
788
789 atomic_inc(&cmd->active); /* active > 1 indicates error */
790 if (cmd->comp_data && cmd->comp_func) {
791 cmd->comp_func(port, MTIP_TAG_INTERNAL,
792 cmd->comp_data, PORT_IRQ_TF_ERR);
793 }
794 goto handle_tfe_exit;
795 }
785 796
786 /* clear the tag accumulator */ 797 /* clear the tag accumulator */
787 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 798 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
788 799
789 /* Set eh_active */
790 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
791
792 /* Loop through all the groups */ 800 /* Loop through all the groups */
793 for (group = 0; group < dd->slot_groups; group++) { 801 for (group = 0; group < dd->slot_groups; group++) {
794 completed = readl(port->completed[group]); 802 completed = readl(port->completed[group]);
@@ -940,6 +948,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
940 } 948 }
941 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); 949 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
942 950
951handle_tfe_exit:
943 /* clear eh_active */ 952 /* clear eh_active */
944 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 953 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
945 wake_up_interruptible(&port->svc_wait); 954 wake_up_interruptible(&port->svc_wait);
@@ -961,6 +970,8 @@ static inline void mtip_process_sdbf(struct driver_data *dd)
961 /* walk all bits in all slot groups */ 970 /* walk all bits in all slot groups */
962 for (group = 0; group < dd->slot_groups; group++) { 971 for (group = 0; group < dd->slot_groups; group++) {
963 completed = readl(port->completed[group]); 972 completed = readl(port->completed[group]);
973 if (!completed)
974 continue;
964 975
965 /* clear completed status register in the hardware.*/ 976 /* clear completed status register in the hardware.*/
966 writel(completed, port->completed[group]); 977 writel(completed, port->completed[group]);
@@ -1329,22 +1340,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1329 } 1340 }
1330 rv = -EAGAIN; 1341 rv = -EAGAIN;
1331 } 1342 }
1332
1333 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1334 & (1 << MTIP_TAG_INTERNAL)) {
1335 dev_warn(&port->dd->pdev->dev,
1336 "Retiring internal command but CI is 1.\n");
1337 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1338 &port->dd->dd_flag)) {
1339 hba_reset_nosleep(port->dd);
1340 rv = -ENXIO;
1341 } else {
1342 mtip_restart_port(port);
1343 rv = -EAGAIN;
1344 }
1345 goto exec_ic_exit;
1346 }
1347
1348 } else { 1343 } else {
1349 /* Spin for <timeout> checking if command still outstanding */ 1344 /* Spin for <timeout> checking if command still outstanding */
1350 timeout = jiffies + msecs_to_jiffies(timeout); 1345 timeout = jiffies + msecs_to_jiffies(timeout);
@@ -1361,21 +1356,25 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1361 rv = -ENXIO; 1356 rv = -ENXIO;
1362 goto exec_ic_exit; 1357 goto exec_ic_exit;
1363 } 1358 }
1359 if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
1360 atomic_inc(&int_cmd->active); /* error */
1361 break;
1362 }
1364 } 1363 }
1364 }
1365 1365
1366 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1366 if (atomic_read(&int_cmd->active) > 1) {
1367 dev_err(&port->dd->pdev->dev,
1368 "Internal command [%02X] failed\n", fis->command);
1369 rv = -EIO;
1370 }
1371 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1367 & (1 << MTIP_TAG_INTERNAL)) { 1372 & (1 << MTIP_TAG_INTERNAL)) {
1368 dev_err(&port->dd->pdev->dev, 1373 rv = -ENXIO;
1369 "Internal command did not complete [atomic]\n"); 1374 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1375 &port->dd->dd_flag)) {
1376 mtip_restart_port(port);
1370 rv = -EAGAIN; 1377 rv = -EAGAIN;
1371 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1372 &port->dd->dd_flag)) {
1373 hba_reset_nosleep(port->dd);
1374 rv = -ENXIO;
1375 } else {
1376 mtip_restart_port(port);
1377 rv = -EAGAIN;
1378 }
1379 } 1378 }
1380 } 1379 }
1381exec_ic_exit: 1380exec_ic_exit:
@@ -1893,13 +1892,33 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1893 void __user *user_buffer) 1892 void __user *user_buffer)
1894{ 1893{
1895 struct host_to_dev_fis fis; 1894 struct host_to_dev_fis fis;
1896 struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG); 1895 struct host_to_dev_fis *reply;
1896 u8 *buf = NULL;
1897 dma_addr_t dma_addr = 0;
1898 int rv = 0, xfer_sz = command[3];
1899
1900 if (xfer_sz) {
1901 if (user_buffer)
1902 return -EFAULT;
1903
1904 buf = dmam_alloc_coherent(&port->dd->pdev->dev,
1905 ATA_SECT_SIZE * xfer_sz,
1906 &dma_addr,
1907 GFP_KERNEL);
1908 if (!buf) {
1909 dev_err(&port->dd->pdev->dev,
1910 "Memory allocation failed (%d bytes)\n",
1911 ATA_SECT_SIZE * xfer_sz);
1912 return -ENOMEM;
1913 }
1914 memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
1915 }
1897 1916
1898 /* Build the FIS. */ 1917 /* Build the FIS. */
1899 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1918 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1900 fis.type = 0x27; 1919 fis.type = 0x27;
1901 fis.opts = 1 << 7; 1920 fis.opts = 1 << 7;
1902 fis.command = command[0]; 1921 fis.command = command[0];
1903 fis.features = command[2]; 1922 fis.features = command[2];
1904 fis.sect_count = command[3]; 1923 fis.sect_count = command[3];
1905 if (fis.command == ATA_CMD_SMART) { 1924 if (fis.command == ATA_CMD_SMART) {
@@ -1908,6 +1927,11 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1908 fis.cyl_hi = 0xC2; 1927 fis.cyl_hi = 0xC2;
1909 } 1928 }
1910 1929
1930 if (xfer_sz)
1931 reply = (port->rxfis + RX_FIS_PIO_SETUP);
1932 else
1933 reply = (port->rxfis + RX_FIS_D2H_REG);
1934
1911 dbg_printk(MTIP_DRV_NAME 1935 dbg_printk(MTIP_DRV_NAME
1912 " %s: User Command: cmd %x, sect %x, " 1936 " %s: User Command: cmd %x, sect %x, "
1913 "feat %x, sectcnt %x\n", 1937 "feat %x, sectcnt %x\n",
@@ -1917,43 +1941,46 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1917 command[2], 1941 command[2],
1918 command[3]); 1942 command[3]);
1919 1943
1920 memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
1921
1922 /* Execute the command. */ 1944 /* Execute the command. */
1923 if (mtip_exec_internal_command(port, 1945 if (mtip_exec_internal_command(port,
1924 &fis, 1946 &fis,
1925 5, 1947 5,
1926 port->sector_buffer_dma, 1948 (xfer_sz ? dma_addr : 0),
1927 (command[3] != 0) ? ATA_SECT_SIZE : 0, 1949 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
1928 0, 1950 0,
1929 GFP_KERNEL, 1951 GFP_KERNEL,
1930 MTIP_IOCTL_COMMAND_TIMEOUT_MS) 1952 MTIP_IOCTL_COMMAND_TIMEOUT_MS)
1931 < 0) { 1953 < 0) {
1932 return -1; 1954 rv = -EFAULT;
1955 goto exit_drive_command;
1933 } 1956 }
1934 1957
1935 /* Collect the completion status. */ 1958 /* Collect the completion status. */
1936 command[0] = reply->command; /* Status*/ 1959 command[0] = reply->command; /* Status*/
1937 command[1] = reply->features; /* Error*/ 1960 command[1] = reply->features; /* Error*/
1938 command[2] = command[3]; 1961 command[2] = reply->sect_count;
1939 1962
1940 dbg_printk(MTIP_DRV_NAME 1963 dbg_printk(MTIP_DRV_NAME
1941 " %s: Completion Status: stat %x, " 1964 " %s: Completion Status: stat %x, "
1942 "err %x, cmd %x\n", 1965 "err %x, nsect %x\n",
1943 __func__, 1966 __func__,
1944 command[0], 1967 command[0],
1945 command[1], 1968 command[1],
1946 command[2]); 1969 command[2]);
1947 1970
1948 if (user_buffer && command[3]) { 1971 if (xfer_sz) {
1949 if (copy_to_user(user_buffer, 1972 if (copy_to_user(user_buffer,
1950 port->sector_buffer, 1973 buf,
1951 ATA_SECT_SIZE * command[3])) { 1974 ATA_SECT_SIZE * command[3])) {
1952 return -EFAULT; 1975 rv = -EFAULT;
1976 goto exit_drive_command;
1953 } 1977 }
1954 } 1978 }
1955 1979exit_drive_command:
1956 return 0; 1980 if (buf)
1981 dmam_free_coherent(&port->dd->pdev->dev,
1982 ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
1983 return rv;
1957} 1984}
1958 1985
1959/* 1986/*
@@ -2003,6 +2030,32 @@ static unsigned int implicit_sector(unsigned char command,
2003 return rv; 2030 return rv;
2004} 2031}
2005 2032
2033static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
2034{
2035 switch (fis->command) {
2036 case ATA_CMD_DOWNLOAD_MICRO:
2037 *timeout = 120000; /* 2 minutes */
2038 break;
2039 case ATA_CMD_SEC_ERASE_UNIT:
2040 case 0xFC:
2041 *timeout = 240000; /* 4 minutes */
2042 break;
2043 case ATA_CMD_STANDBYNOW1:
2044 *timeout = 10000; /* 10 seconds */
2045 break;
2046 case 0xF7:
2047 case 0xFA:
2048 *timeout = 60000; /* 60 seconds */
2049 break;
2050 case ATA_CMD_SMART:
2051 *timeout = 15000; /* 15 seconds */
2052 break;
2053 default:
2054 *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
2055 break;
2056 }
2057}
2058
2006/* 2059/*
2007 * Executes a taskfile 2060 * Executes a taskfile
2008 * See ide_taskfile_ioctl() for derivation 2061 * See ide_taskfile_ioctl() for derivation
@@ -2023,7 +2076,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
2023 unsigned int taskin = 0; 2076 unsigned int taskin = 0;
2024 unsigned int taskout = 0; 2077 unsigned int taskout = 0;
2025 u8 nsect = 0; 2078 u8 nsect = 0;
2026 unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; 2079 unsigned int timeout;
2027 unsigned int force_single_sector; 2080 unsigned int force_single_sector;
2028 unsigned int transfer_size; 2081 unsigned int transfer_size;
2029 unsigned long task_file_data; 2082 unsigned long task_file_data;
@@ -2153,32 +2206,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
2153 fis.lba_hi, 2206 fis.lba_hi,
2154 fis.device); 2207 fis.device);
2155 2208
2156 switch (fis.command) { 2209 mtip_set_timeout(&fis, &timeout);
2157 case ATA_CMD_DOWNLOAD_MICRO:
2158 /* Change timeout for Download Microcode to 2 minutes */
2159 timeout = 120000;
2160 break;
2161 case ATA_CMD_SEC_ERASE_UNIT:
2162 /* Change timeout for Security Erase Unit to 4 minutes.*/
2163 timeout = 240000;
2164 break;
2165 case ATA_CMD_STANDBYNOW1:
2166 /* Change timeout for standby immediate to 10 seconds.*/
2167 timeout = 10000;
2168 break;
2169 case 0xF7:
2170 case 0xFA:
2171 /* Change timeout for vendor unique command to 10 secs */
2172 timeout = 10000;
2173 break;
2174 case ATA_CMD_SMART:
2175 /* Change timeout for vendor unique command to 15 secs */
2176 timeout = 15000;
2177 break;
2178 default:
2179 timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
2180 break;
2181 }
2182 2210
2183 /* Determine the correct transfer size.*/ 2211 /* Determine the correct transfer size.*/
2184 if (force_single_sector) 2212 if (force_single_sector)
@@ -2295,13 +2323,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2295{ 2323{
2296 switch (cmd) { 2324 switch (cmd) {
2297 case HDIO_GET_IDENTITY: 2325 case HDIO_GET_IDENTITY:
2298 if (mtip_get_identify(dd->port, (void __user *) arg) < 0) { 2326 {
2299 dev_warn(&dd->pdev->dev, 2327 if (copy_to_user((void __user *)arg, dd->port->identify,
2300 "Unable to read identity\n"); 2328 sizeof(u16) * ATA_ID_WORDS))
2301 return -EIO; 2329 return -EFAULT;
2302 }
2303
2304 break; 2330 break;
2331 }
2305 case HDIO_DRIVE_CMD: 2332 case HDIO_DRIVE_CMD:
2306 { 2333 {
2307 u8 drive_command[4]; 2334 u8 drive_command[4];
@@ -2537,40 +2564,58 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
2537 int size = 0; 2564 int size = 0;
2538 int n; 2565 int n;
2539 2566
2540 size += sprintf(&buf[size], "S ACTive:\n"); 2567 size += sprintf(&buf[size], "Hardware\n--------\n");
2568 size += sprintf(&buf[size], "S ACTive : [ 0x");
2541 2569
2542 for (n = 0; n < dd->slot_groups; n++) 2570 for (n = dd->slot_groups-1; n >= 0; n--)
2543 size += sprintf(&buf[size], "0x%08x\n", 2571 size += sprintf(&buf[size], "%08X ",
2544 readl(dd->port->s_active[n])); 2572 readl(dd->port->s_active[n]));
2545 2573
2546 size += sprintf(&buf[size], "Command Issue:\n"); 2574 size += sprintf(&buf[size], "]\n");
2575 size += sprintf(&buf[size], "Command Issue : [ 0x");
2547 2576
2548 for (n = 0; n < dd->slot_groups; n++) 2577 for (n = dd->slot_groups-1; n >= 0; n--)
2549 size += sprintf(&buf[size], "0x%08x\n", 2578 size += sprintf(&buf[size], "%08X ",
2550 readl(dd->port->cmd_issue[n])); 2579 readl(dd->port->cmd_issue[n]));
2551 2580
2552 size += sprintf(&buf[size], "Allocated:\n"); 2581 size += sprintf(&buf[size], "]\n");
2582 size += sprintf(&buf[size], "Completed : [ 0x");
2583
2584 for (n = dd->slot_groups-1; n >= 0; n--)
2585 size += sprintf(&buf[size], "%08X ",
2586 readl(dd->port->completed[n]));
2587
2588 size += sprintf(&buf[size], "]\n");
2589 size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
2590 readl(dd->port->mmio + PORT_IRQ_STAT));
2591 size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
2592 readl(dd->mmio + HOST_IRQ_STAT));
2593 size += sprintf(&buf[size], "\n");
2553 2594
2554 for (n = 0; n < dd->slot_groups; n++) { 2595 size += sprintf(&buf[size], "Local\n-----\n");
2596 size += sprintf(&buf[size], "Allocated : [ 0x");
2597
2598 for (n = dd->slot_groups-1; n >= 0; n--) {
2555 if (sizeof(long) > sizeof(u32)) 2599 if (sizeof(long) > sizeof(u32))
2556 group_allocated = 2600 group_allocated =
2557 dd->port->allocated[n/2] >> (32*(n&1)); 2601 dd->port->allocated[n/2] >> (32*(n&1));
2558 else 2602 else
2559 group_allocated = dd->port->allocated[n]; 2603 group_allocated = dd->port->allocated[n];
2560 size += sprintf(&buf[size], "0x%08x\n", 2604 size += sprintf(&buf[size], "%08X ", group_allocated);
2561 group_allocated);
2562 } 2605 }
2606 size += sprintf(&buf[size], "]\n");
2563 2607
2564 size += sprintf(&buf[size], "Completed:\n"); 2608 size += sprintf(&buf[size], "Commands in Q: [ 0x");
2565
2566 for (n = 0; n < dd->slot_groups; n++)
2567 size += sprintf(&buf[size], "0x%08x\n",
2568 readl(dd->port->completed[n]));
2569 2609
2570 size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n", 2610 for (n = dd->slot_groups-1; n >= 0; n--) {
2571 readl(dd->port->mmio + PORT_IRQ_STAT)); 2611 if (sizeof(long) > sizeof(u32))
2572 size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n", 2612 group_allocated =
2573 readl(dd->mmio + HOST_IRQ_STAT)); 2613 dd->port->cmds_to_issue[n/2] >> (32*(n&1));
2614 else
2615 group_allocated = dd->port->cmds_to_issue[n];
2616 size += sprintf(&buf[size], "%08X ", group_allocated);
2617 }
2618 size += sprintf(&buf[size], "]\n");
2574 2619
2575 return size; 2620 return size;
2576} 2621}
@@ -2592,8 +2637,24 @@ static ssize_t mtip_hw_show_status(struct device *dev,
2592 return size; 2637 return size;
2593} 2638}
2594 2639
2640static ssize_t mtip_hw_show_flags(struct device *dev,
2641 struct device_attribute *attr,
2642 char *buf)
2643{
2644 struct driver_data *dd = dev_to_disk(dev)->private_data;
2645 int size = 0;
2646
2647 size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
2648 dd->port->flags);
2649 size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n",
2650 dd->dd_flag);
2651
2652 return size;
2653}
2654
2595static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL); 2655static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
2596static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); 2656static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2657static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
2597 2658
2598/* 2659/*
2599 * Create the sysfs related attributes. 2660 * Create the sysfs related attributes.
@@ -2616,6 +2677,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2616 if (sysfs_create_file(kobj, &dev_attr_status.attr)) 2677 if (sysfs_create_file(kobj, &dev_attr_status.attr))
2617 dev_warn(&dd->pdev->dev, 2678 dev_warn(&dd->pdev->dev,
2618 "Error creating 'status' sysfs entry\n"); 2679 "Error creating 'status' sysfs entry\n");
2680 if (sysfs_create_file(kobj, &dev_attr_flags.attr))
2681 dev_warn(&dd->pdev->dev,
2682 "Error creating 'flags' sysfs entry\n");
2619 return 0; 2683 return 0;
2620} 2684}
2621 2685
@@ -2636,6 +2700,7 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2636 2700
2637 sysfs_remove_file(kobj, &dev_attr_registers.attr); 2701 sysfs_remove_file(kobj, &dev_attr_registers.attr);
2638 sysfs_remove_file(kobj, &dev_attr_status.attr); 2702 sysfs_remove_file(kobj, &dev_attr_status.attr);
2703 sysfs_remove_file(kobj, &dev_attr_flags.attr);
2639 2704
2640 return 0; 2705 return 0;
2641} 2706}
@@ -3634,7 +3699,10 @@ skip_create_disk:
3634 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); 3699 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
3635 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); 3700 blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
3636 blk_queue_physical_block_size(dd->queue, 4096); 3701 blk_queue_physical_block_size(dd->queue, 4096);
3702 blk_queue_max_hw_sectors(dd->queue, 0xffff);
3703 blk_queue_max_segment_size(dd->queue, 0x400000);
3637 blk_queue_io_min(dd->queue, 4096); 3704 blk_queue_io_min(dd->queue, 4096);
3705
3638 /* 3706 /*
3639 * write back cache is not supported in the device. FUA depends on 3707 * write back cache is not supported in the device. FUA depends on
3640 * write back cache support, hence setting flush support to zero. 3708 * write back cache support, hence setting flush support to zero.
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 4ef58336310a..b2c88da26b2a 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -113,33 +113,35 @@
113 113
114#define __force_bit2int (unsigned int __force) 114#define __force_bit2int (unsigned int __force)
115 115
116/* below are bit numbers in 'flags' defined in mtip_port */ 116enum {
117#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */ 117 /* below are bit numbers in 'flags' defined in mtip_port */
118#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */ 118 MTIP_PF_IC_ACTIVE_BIT = 0, /* pio/ioctl */
119#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */ 119 MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
120#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */ 120 MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
121#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ 121 MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
122 MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
122 (1 << MTIP_PF_EH_ACTIVE_BIT) | \ 123 (1 << MTIP_PF_EH_ACTIVE_BIT) | \
123 (1 << MTIP_PF_SE_ACTIVE_BIT) | \ 124 (1 << MTIP_PF_SE_ACTIVE_BIT) | \
124 (1 << MTIP_PF_DM_ACTIVE_BIT)) 125 (1 << MTIP_PF_DM_ACTIVE_BIT)),
125 126
126#define MTIP_PF_SVC_THD_ACTIVE_BIT 4 127 MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
127#define MTIP_PF_ISSUE_CMDS_BIT 5 128 MTIP_PF_ISSUE_CMDS_BIT = 5,
128#define MTIP_PF_REBUILD_BIT 6 129 MTIP_PF_REBUILD_BIT = 6,
129#define MTIP_PF_SVC_THD_STOP_BIT 8 130 MTIP_PF_SVC_THD_STOP_BIT = 8,
130 131
131/* below are bit numbers in 'dd_flag' defined in driver_data */ 132 /* below are bit numbers in 'dd_flag' defined in driver_data */
132#define MTIP_DDF_REMOVE_PENDING_BIT 1 133 MTIP_DDF_REMOVE_PENDING_BIT = 1,
133#define MTIP_DDF_OVER_TEMP_BIT 2 134 MTIP_DDF_OVER_TEMP_BIT = 2,
134#define MTIP_DDF_WRITE_PROTECT_BIT 3 135 MTIP_DDF_WRITE_PROTECT_BIT = 3,
135#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ 136 MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
136 (1 << MTIP_DDF_OVER_TEMP_BIT) | \ 137 (1 << MTIP_DDF_OVER_TEMP_BIT) | \
137 (1 << MTIP_DDF_WRITE_PROTECT_BIT)) 138 (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
138 139
139#define MTIP_DDF_CLEANUP_BIT 5 140 MTIP_DDF_CLEANUP_BIT = 5,
140#define MTIP_DDF_RESUME_BIT 6 141 MTIP_DDF_RESUME_BIT = 6,
141#define MTIP_DDF_INIT_DONE_BIT 7 142 MTIP_DDF_INIT_DONE_BIT = 7,
142#define MTIP_DDF_REBUILD_FAILED_BIT 8 143 MTIP_DDF_REBUILD_FAILED_BIT = 8,
144};
143 145
144__packed struct smart_attr{ 146__packed struct smart_attr{
145 u8 attr_id; 147 u8 attr_id;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index c98c5689bb0b..92622d44e12d 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -899,6 +899,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
899 ID(PCI_DEVICE_ID_INTEL_B43_HB), 899 ID(PCI_DEVICE_ID_INTEL_B43_HB),
900 ID(PCI_DEVICE_ID_INTEL_B43_1_HB), 900 ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
901 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), 901 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
902 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
902 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), 903 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
903 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), 904 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
904 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 905 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index cf2e764b1760..57226424690c 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -212,6 +212,7 @@
212#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 212#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
213#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 213#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
214#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 214#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
215#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
215#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 216#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
216#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 217#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
217#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 218#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index f518b99f53f5..731c9046cf7b 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -34,8 +34,15 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
34 u32 *data = buf; 34 u32 *data = buf;
35 35
36 /* data ready? */ 36 /* data ready? */
37 if (readl(trng->base + TRNG_ODATA) & 1) { 37 if (readl(trng->base + TRNG_ISR) & 1) {
38 *data = readl(trng->base + TRNG_ODATA); 38 *data = readl(trng->base + TRNG_ODATA);
39 /*
40 ensure data ready is only set again AFTER the next data
41 word is ready in case it got set between checking ISR
42 and reading ODATA, so we don't risk re-reading the
43 same word
44 */
45 readl(trng->base + TRNG_ISR);
39 return 4; 46 return 4;
40 } else 47 } else
41 return 0; 48 return 0;
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index af34074e702b..6756e7c3bc07 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 4dbdb3fe18e0..958aa3ad1d60 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index b471c9762a97..1afc18c4effc 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index dcd4bdf4b0d9..5f1b6badeb15 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 376d4e5ff326..7cd63788d546 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 3321c46a071c..931737677dfa 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -2,7 +2,7 @@
2 * Clock framework definitions for SPEAr platform 2 * Clock framework definitions for SPEAr platform
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 42b68df9aeef..8f05652d53e6 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1310 machine clock framework source file 4 * SPEAr1310 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index f130919d5bf8..e3ea72162236 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1340 machine clock framework source file 4 * SPEAr1340 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 440bb3e4c971..01dd6daff2a1 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr3xx machines clock framework source file 2 * SPEAr3xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index f9a20b382304..554d64b062a1 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr6xx machines clock framework source file 2 * SPEAr6xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 8d81a1d32653..dd3e661a124d 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
6obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o 6obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
7obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o 7obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
8obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o 8obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
9obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
9obj-$(CONFIG_CLKBLD_I8253) += i8253.o 10obj-$(CONFIG_CLKBLD_I8253) += i8253.o
10obj-$(CONFIG_CLKSRC_MMIO) += mmio.o 11obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
11obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o 12obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
new file mode 100644
index 000000000000..372051d1bba8
--- /dev/null
+++ b/drivers/clocksource/em_sti.c
@@ -0,0 +1,406 @@
1/*
2 * Emma Mobile Timer Support - STI
3 *
4 * Copyright (C) 2012 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/io.h>
26#include <linux/clk.h>
27#include <linux/irq.h>
28#include <linux/err.h>
29#include <linux/delay.h>
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/slab.h>
33#include <linux/module.h>
34
35enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
36
37struct em_sti_priv {
38 void __iomem *base;
39 struct clk *clk;
40 struct platform_device *pdev;
41 unsigned int active[USER_NR];
42 unsigned long rate;
43 raw_spinlock_t lock;
44 struct clock_event_device ced;
45 struct clocksource cs;
46};
47
48#define STI_CONTROL 0x00
49#define STI_COMPA_H 0x10
50#define STI_COMPA_L 0x14
51#define STI_COMPB_H 0x18
52#define STI_COMPB_L 0x1c
53#define STI_COUNT_H 0x20
54#define STI_COUNT_L 0x24
55#define STI_COUNT_RAW_H 0x28
56#define STI_COUNT_RAW_L 0x2c
57#define STI_SET_H 0x30
58#define STI_SET_L 0x34
59#define STI_INTSTATUS 0x40
60#define STI_INTRAWSTATUS 0x44
61#define STI_INTENSET 0x48
62#define STI_INTENCLR 0x4c
63#define STI_INTFFCLR 0x50
64
65static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
66{
67 return ioread32(p->base + offs);
68}
69
70static inline void em_sti_write(struct em_sti_priv *p, int offs,
71 unsigned long value)
72{
73 iowrite32(value, p->base + offs);
74}
75
76static int em_sti_enable(struct em_sti_priv *p)
77{
78 int ret;
79
80 /* enable clock */
81 ret = clk_enable(p->clk);
82 if (ret) {
83 dev_err(&p->pdev->dev, "cannot enable clock\n");
84 return ret;
85 }
86
87 /* configure channel, periodic mode and maximum timeout */
88 p->rate = clk_get_rate(p->clk);
89
90 /* reset the counter */
91 em_sti_write(p, STI_SET_H, 0x40000000);
92 em_sti_write(p, STI_SET_L, 0x00000000);
93
94 /* mask and clear pending interrupts */
95 em_sti_write(p, STI_INTENCLR, 3);
96 em_sti_write(p, STI_INTFFCLR, 3);
97
98 /* enable updates of counter registers */
99 em_sti_write(p, STI_CONTROL, 1);
100
101 return 0;
102}
103
104static void em_sti_disable(struct em_sti_priv *p)
105{
106 /* mask interrupts */
107 em_sti_write(p, STI_INTENCLR, 3);
108
109 /* stop clock */
110 clk_disable(p->clk);
111}
112
113static cycle_t em_sti_count(struct em_sti_priv *p)
114{
115 cycle_t ticks;
116 unsigned long flags;
117
118 /* the STI hardware buffers the 48-bit count, but to
119 * break it out into two 32-bit access the registers
120 * must be accessed in a certain order.
121 * Always read STI_COUNT_H before STI_COUNT_L.
122 */
123 raw_spin_lock_irqsave(&p->lock, flags);
124 ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
125 ticks |= em_sti_read(p, STI_COUNT_L);
126 raw_spin_unlock_irqrestore(&p->lock, flags);
127
128 return ticks;
129}
130
131static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
132{
133 unsigned long flags;
134
135 raw_spin_lock_irqsave(&p->lock, flags);
136
137 /* mask compare A interrupt */
138 em_sti_write(p, STI_INTENCLR, 1);
139
140 /* update compare A value */
141 em_sti_write(p, STI_COMPA_H, next >> 32);
142 em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
143
144 /* clear compare A interrupt source */
145 em_sti_write(p, STI_INTFFCLR, 1);
146
147 /* unmask compare A interrupt */
148 em_sti_write(p, STI_INTENSET, 1);
149
150 raw_spin_unlock_irqrestore(&p->lock, flags);
151
152 return next;
153}
154
155static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
156{
157 struct em_sti_priv *p = dev_id;
158
159 p->ced.event_handler(&p->ced);
160 return IRQ_HANDLED;
161}
162
163static int em_sti_start(struct em_sti_priv *p, unsigned int user)
164{
165 unsigned long flags;
166 int used_before;
167 int ret = 0;
168
169 raw_spin_lock_irqsave(&p->lock, flags);
170 used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
171 if (!used_before)
172 ret = em_sti_enable(p);
173
174 if (!ret)
175 p->active[user] = 1;
176 raw_spin_unlock_irqrestore(&p->lock, flags);
177
178 return ret;
179}
180
181static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
182{
183 unsigned long flags;
184 int used_before, used_after;
185
186 raw_spin_lock_irqsave(&p->lock, flags);
187 used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
188 p->active[user] = 0;
189 used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
190
191 if (used_before && !used_after)
192 em_sti_disable(p);
193 raw_spin_unlock_irqrestore(&p->lock, flags);
194}
195
196static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
197{
198 return container_of(cs, struct em_sti_priv, cs);
199}
200
201static cycle_t em_sti_clocksource_read(struct clocksource *cs)
202{
203 return em_sti_count(cs_to_em_sti(cs));
204}
205
206static int em_sti_clocksource_enable(struct clocksource *cs)
207{
208 int ret;
209 struct em_sti_priv *p = cs_to_em_sti(cs);
210
211 ret = em_sti_start(p, USER_CLOCKSOURCE);
212 if (!ret)
213 __clocksource_updatefreq_hz(cs, p->rate);
214 return ret;
215}
216
217static void em_sti_clocksource_disable(struct clocksource *cs)
218{
219 em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
220}
221
222static void em_sti_clocksource_resume(struct clocksource *cs)
223{
224 em_sti_clocksource_enable(cs);
225}
226
227static int em_sti_register_clocksource(struct em_sti_priv *p)
228{
229 struct clocksource *cs = &p->cs;
230
231 memset(cs, 0, sizeof(*cs));
232 cs->name = dev_name(&p->pdev->dev);
233 cs->rating = 200;
234 cs->read = em_sti_clocksource_read;
235 cs->enable = em_sti_clocksource_enable;
236 cs->disable = em_sti_clocksource_disable;
237 cs->suspend = em_sti_clocksource_disable;
238 cs->resume = em_sti_clocksource_resume;
239 cs->mask = CLOCKSOURCE_MASK(48);
240 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
241
242 dev_info(&p->pdev->dev, "used as clock source\n");
243
244 /* Register with dummy 1 Hz value, gets updated in ->enable() */
245 clocksource_register_hz(cs, 1);
246 return 0;
247}
248
249static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
250{
251 return container_of(ced, struct em_sti_priv, ced);
252}
253
254static void em_sti_clock_event_mode(enum clock_event_mode mode,
255 struct clock_event_device *ced)
256{
257 struct em_sti_priv *p = ced_to_em_sti(ced);
258
259 /* deal with old setting first */
260 switch (ced->mode) {
261 case CLOCK_EVT_MODE_ONESHOT:
262 em_sti_stop(p, USER_CLOCKEVENT);
263 break;
264 default:
265 break;
266 }
267
268 switch (mode) {
269 case CLOCK_EVT_MODE_ONESHOT:
270 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
271 em_sti_start(p, USER_CLOCKEVENT);
272 clockevents_config(&p->ced, p->rate);
273 break;
274 case CLOCK_EVT_MODE_SHUTDOWN:
275 case CLOCK_EVT_MODE_UNUSED:
276 em_sti_stop(p, USER_CLOCKEVENT);
277 break;
278 default:
279 break;
280 }
281}
282
283static int em_sti_clock_event_next(unsigned long delta,
284 struct clock_event_device *ced)
285{
286 struct em_sti_priv *p = ced_to_em_sti(ced);
287 cycle_t next;
288 int safe;
289
290 next = em_sti_set_next(p, em_sti_count(p) + delta);
291 safe = em_sti_count(p) < (next - 1);
292
293 return !safe;
294}
295
296static void em_sti_register_clockevent(struct em_sti_priv *p)
297{
298 struct clock_event_device *ced = &p->ced;
299
300 memset(ced, 0, sizeof(*ced));
301 ced->name = dev_name(&p->pdev->dev);
302 ced->features = CLOCK_EVT_FEAT_ONESHOT;
303 ced->rating = 200;
304 ced->cpumask = cpumask_of(0);
305 ced->set_next_event = em_sti_clock_event_next;
306 ced->set_mode = em_sti_clock_event_mode;
307
308 dev_info(&p->pdev->dev, "used for clock events\n");
309
310 /* Register with dummy 1 Hz value, gets updated in ->set_mode() */
311 clockevents_config_and_register(ced, 1, 2, 0xffffffff);
312}
313
314static int __devinit em_sti_probe(struct platform_device *pdev)
315{
316 struct em_sti_priv *p;
317 struct resource *res;
318 int irq, ret;
319
320 p = kzalloc(sizeof(*p), GFP_KERNEL);
321 if (p == NULL) {
322 dev_err(&pdev->dev, "failed to allocate driver data\n");
323 ret = -ENOMEM;
324 goto err0;
325 }
326
327 p->pdev = pdev;
328 platform_set_drvdata(pdev, p);
329
330 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
331 if (!res) {
332 dev_err(&pdev->dev, "failed to get I/O memory\n");
333 ret = -EINVAL;
334 goto err0;
335 }
336
337 irq = platform_get_irq(pdev, 0);
338 if (irq < 0) {
339 dev_err(&pdev->dev, "failed to get irq\n");
340 ret = -EINVAL;
341 goto err0;
342 }
343
344 /* map memory, let base point to the STI instance */
345 p->base = ioremap_nocache(res->start, resource_size(res));
346 if (p->base == NULL) {
347 dev_err(&pdev->dev, "failed to remap I/O memory\n");
348 ret = -ENXIO;
349 goto err0;
350 }
351
352 /* get hold of clock */
353 p->clk = clk_get(&pdev->dev, "sclk");
354 if (IS_ERR(p->clk)) {
355 dev_err(&pdev->dev, "cannot get clock\n");
356 ret = PTR_ERR(p->clk);
357 goto err1;
358 }
359
360 if (request_irq(irq, em_sti_interrupt,
361 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
362 dev_name(&pdev->dev), p)) {
363 dev_err(&pdev->dev, "failed to request low IRQ\n");
364 ret = -ENOENT;
365 goto err2;
366 }
367
368 raw_spin_lock_init(&p->lock);
369 em_sti_register_clockevent(p);
370 em_sti_register_clocksource(p);
371 return 0;
372
373err2:
374 clk_put(p->clk);
375err1:
376 iounmap(p->base);
377err0:
378 kfree(p);
379 return ret;
380}
381
382static int __devexit em_sti_remove(struct platform_device *pdev)
383{
384 return -EBUSY; /* cannot unregister clockevent and clocksource */
385}
386
387static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
388 { .compatible = "renesas,em-sti", },
389 {},
390};
391MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
392
393static struct platform_driver em_sti_device_driver = {
394 .probe = em_sti_probe,
395 .remove = __devexit_p(em_sti_remove),
396 .driver = {
397 .name = "em_sti",
398 .of_match_table = em_sti_dt_ids,
399 }
400};
401
402module_platform_driver(em_sti_device_driver);
403
404MODULE_AUTHOR("Magnus Damm");
405MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
406MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 32fe9ef5cc5c..98b06baafcc6 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -48,13 +48,13 @@ struct sh_cmt_priv {
48 unsigned long next_match_value; 48 unsigned long next_match_value;
49 unsigned long max_match_value; 49 unsigned long max_match_value;
50 unsigned long rate; 50 unsigned long rate;
51 spinlock_t lock; 51 raw_spinlock_t lock;
52 struct clock_event_device ced; 52 struct clock_event_device ced;
53 struct clocksource cs; 53 struct clocksource cs;
54 unsigned long total_cycles; 54 unsigned long total_cycles;
55}; 55};
56 56
57static DEFINE_SPINLOCK(sh_cmt_lock); 57static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
58 58
59#define CMSTR -1 /* shared register */ 59#define CMSTR -1 /* shared register */
60#define CMCSR 0 /* channel register */ 60#define CMCSR 0 /* channel register */
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
139 unsigned long flags, value; 139 unsigned long flags, value;
140 140
141 /* start stop register shared by multiple timer channels */ 141 /* start stop register shared by multiple timer channels */
142 spin_lock_irqsave(&sh_cmt_lock, flags); 142 raw_spin_lock_irqsave(&sh_cmt_lock, flags);
143 value = sh_cmt_read(p, CMSTR); 143 value = sh_cmt_read(p, CMSTR);
144 144
145 if (start) 145 if (start)
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
148 value &= ~(1 << cfg->timer_bit); 148 value &= ~(1 << cfg->timer_bit);
149 149
150 sh_cmt_write(p, CMSTR, value); 150 sh_cmt_write(p, CMSTR, value);
151 spin_unlock_irqrestore(&sh_cmt_lock, flags); 151 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
152} 152}
153 153
154static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 154static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
328{ 328{
329 unsigned long flags; 329 unsigned long flags;
330 330
331 spin_lock_irqsave(&p->lock, flags); 331 raw_spin_lock_irqsave(&p->lock, flags);
332 __sh_cmt_set_next(p, delta); 332 __sh_cmt_set_next(p, delta);
333 spin_unlock_irqrestore(&p->lock, flags); 333 raw_spin_unlock_irqrestore(&p->lock, flags);
334} 334}
335 335
336static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) 336static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
385 int ret = 0; 385 int ret = 0;
386 unsigned long flags; 386 unsigned long flags;
387 387
388 spin_lock_irqsave(&p->lock, flags); 388 raw_spin_lock_irqsave(&p->lock, flags);
389 389
390 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 390 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
391 ret = sh_cmt_enable(p, &p->rate); 391 ret = sh_cmt_enable(p, &p->rate);
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
398 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) 398 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
399 __sh_cmt_set_next(p, p->max_match_value); 399 __sh_cmt_set_next(p, p->max_match_value);
400 out: 400 out:
401 spin_unlock_irqrestore(&p->lock, flags); 401 raw_spin_unlock_irqrestore(&p->lock, flags);
402 402
403 return ret; 403 return ret;
404} 404}
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
408 unsigned long flags; 408 unsigned long flags;
409 unsigned long f; 409 unsigned long f;
410 410
411 spin_lock_irqsave(&p->lock, flags); 411 raw_spin_lock_irqsave(&p->lock, flags);
412 412
413 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); 413 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
414 p->flags &= ~flag; 414 p->flags &= ~flag;
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
420 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) 420 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
421 __sh_cmt_set_next(p, p->max_match_value); 421 __sh_cmt_set_next(p, p->max_match_value);
422 422
423 spin_unlock_irqrestore(&p->lock, flags); 423 raw_spin_unlock_irqrestore(&p->lock, flags);
424} 424}
425 425
426static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) 426static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
435 unsigned long value; 435 unsigned long value;
436 int has_wrapped; 436 int has_wrapped;
437 437
438 spin_lock_irqsave(&p->lock, flags); 438 raw_spin_lock_irqsave(&p->lock, flags);
439 value = p->total_cycles; 439 value = p->total_cycles;
440 raw = sh_cmt_get_counter(p, &has_wrapped); 440 raw = sh_cmt_get_counter(p, &has_wrapped);
441 441
442 if (unlikely(has_wrapped)) 442 if (unlikely(has_wrapped))
443 raw += p->match_value + 1; 443 raw += p->match_value + 1;
444 spin_unlock_irqrestore(&p->lock, flags); 444 raw_spin_unlock_irqrestore(&p->lock, flags);
445 445
446 return value + raw; 446 return value + raw;
447} 447}
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
591 p->max_match_value = (1 << p->width) - 1; 591 p->max_match_value = (1 << p->width) - 1;
592 592
593 p->match_value = p->max_match_value; 593 p->match_value = p->max_match_value;
594 spin_lock_init(&p->lock); 594 raw_spin_lock_init(&p->lock);
595 595
596 if (clockevent_rating) 596 if (clockevent_rating)
597 sh_cmt_register_clockevent(p, name, clockevent_rating); 597 sh_cmt_register_clockevent(p, name, clockevent_rating);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index a2172f690418..d9b76ca64a61 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -43,7 +43,7 @@ struct sh_mtu2_priv {
43 struct clock_event_device ced; 43 struct clock_event_device ced;
44}; 44};
45 45
46static DEFINE_SPINLOCK(sh_mtu2_lock); 46static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
47 47
48#define TSTR -1 /* shared register */ 48#define TSTR -1 /* shared register */
49#define TCR 0 /* channel register */ 49#define TCR 0 /* channel register */
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
107 unsigned long flags, value; 107 unsigned long flags, value;
108 108
109 /* start stop register shared by multiple timer channels */ 109 /* start stop register shared by multiple timer channels */
110 spin_lock_irqsave(&sh_mtu2_lock, flags); 110 raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
111 value = sh_mtu2_read(p, TSTR); 111 value = sh_mtu2_read(p, TSTR);
112 112
113 if (start) 113 if (start)
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
116 value &= ~(1 << cfg->timer_bit); 116 value &= ~(1 << cfg->timer_bit);
117 117
118 sh_mtu2_write(p, TSTR, value); 118 sh_mtu2_write(p, TSTR, value);
119 spin_unlock_irqrestore(&sh_mtu2_lock, flags); 119 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
120} 120}
121 121
122static int sh_mtu2_enable(struct sh_mtu2_priv *p) 122static int sh_mtu2_enable(struct sh_mtu2_priv *p)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 97f54b634be4..c1b51d49d106 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -45,7 +45,7 @@ struct sh_tmu_priv {
45 struct clocksource cs; 45 struct clocksource cs;
46}; 46};
47 47
48static DEFINE_SPINLOCK(sh_tmu_lock); 48static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
49 49
50#define TSTR -1 /* shared register */ 50#define TSTR -1 /* shared register */
51#define TCOR 0 /* channel register */ 51#define TCOR 0 /* channel register */
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
95 unsigned long flags, value; 95 unsigned long flags, value;
96 96
97 /* start stop register shared by multiple timer channels */ 97 /* start stop register shared by multiple timer channels */
98 spin_lock_irqsave(&sh_tmu_lock, flags); 98 raw_spin_lock_irqsave(&sh_tmu_lock, flags);
99 value = sh_tmu_read(p, TSTR); 99 value = sh_tmu_read(p, TSTR);
100 100
101 if (start) 101 if (start)
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
104 value &= ~(1 << cfg->timer_bit); 104 value &= ~(1 << cfg->timer_bit);
105 105
106 sh_tmu_write(p, TSTR, value); 106 sh_tmu_write(p, TSTR, value);
107 spin_unlock_irqrestore(&sh_tmu_lock, flags); 107 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
108} 108}
109 109
110static int sh_tmu_enable(struct sh_tmu_priv *p) 110static int sh_tmu_enable(struct sh_tmu_priv *p)
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
245 245
246 sh_tmu_enable(p); 246 sh_tmu_enable(p);
247 247
248 /* TODO: calculate good shift from rate and counter bit width */ 248 clockevents_config(ced, p->rate);
249
250 ced->shift = 32;
251 ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
252 ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
253 ced->min_delta_ns = 5000;
254 249
255 if (periodic) { 250 if (periodic) {
256 p->periodic = (p->rate + HZ/2) / HZ; 251 p->periodic = (p->rate + HZ/2) / HZ;
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
323 ced->set_mode = sh_tmu_clock_event_mode; 318 ced->set_mode = sh_tmu_clock_event_mode;
324 319
325 dev_info(&p->pdev->dev, "used for clock events\n"); 320 dev_info(&p->pdev->dev, "used for clock events\n");
326 clockevents_register_device(ced); 321
322 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
327 323
328 ret = setup_irq(p->irqaction.irq, &p->irqaction); 324 ret = setup_irq(p->irqaction.irq, &p->irqaction);
329 if (ret) { 325 if (ret) {
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index e23dc82d43ac..721296157577 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1626,4 +1626,4 @@ module_exit(dw_exit);
1626MODULE_LICENSE("GPL v2"); 1626MODULE_LICENSE("GPL v2");
1627MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); 1627MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1628MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1628MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1629MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 1629MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fb4f4990f5eb..1dc2a4ad0026 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -815,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
815 815
816 init_completion(&sdmac->done); 816 init_completion(&sdmac->done);
817 817
818 sdmac->buf_tail = 0;
819
820 return 0; 818 return 0;
821out: 819out:
822 820
@@ -927,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
927 925
928 sdmac->flags = 0; 926 sdmac->flags = 0;
929 927
928 sdmac->buf_tail = 0;
929
930 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 930 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
931 sg_len, channel); 931 sg_len, channel);
932 932
@@ -1027,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1027 1027
1028 sdmac->status = DMA_IN_PROGRESS; 1028 sdmac->status = DMA_IN_PROGRESS;
1029 1029
1030 sdmac->buf_tail = 0;
1031
1030 sdmac->flags |= IMX_DMA_SG_LOOP; 1032 sdmac->flags |= IMX_DMA_SG_LOOP;
1031 sdmac->direction = direction; 1033 sdmac->direction = direction;
1032 ret = sdma_load_context(sdmac); 1034 ret = sdma_load_context(sdmac);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cbcc28e79be6..e4feba6b03c0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -392,6 +392,8 @@ struct pl330_req {
392 struct pl330_reqcfg *cfg; 392 struct pl330_reqcfg *cfg;
393 /* Pointer to first xfer in the request. */ 393 /* Pointer to first xfer in the request. */
394 struct pl330_xfer *x; 394 struct pl330_xfer *x;
395 /* Hook to attach to DMAC's list of reqs with due callback */
396 struct list_head rqd;
395}; 397};
396 398
397/* 399/*
@@ -461,8 +463,6 @@ struct _pl330_req {
461 /* Number of bytes taken to setup MC for the req */ 463 /* Number of bytes taken to setup MC for the req */
462 u32 mc_len; 464 u32 mc_len;
463 struct pl330_req *r; 465 struct pl330_req *r;
464 /* Hook to attach to DMAC's list of reqs with due callback */
465 struct list_head rqd;
466}; 466};
467 467
468/* ToBeDone for tasklet */ 468/* ToBeDone for tasklet */
@@ -1683,7 +1683,7 @@ static void pl330_dotask(unsigned long data)
1683/* Returns 1 if state was updated, 0 otherwise */ 1683/* Returns 1 if state was updated, 0 otherwise */
1684static int pl330_update(const struct pl330_info *pi) 1684static int pl330_update(const struct pl330_info *pi)
1685{ 1685{
1686 struct _pl330_req *rqdone; 1686 struct pl330_req *rqdone, *tmp;
1687 struct pl330_dmac *pl330; 1687 struct pl330_dmac *pl330;
1688 unsigned long flags; 1688 unsigned long flags;
1689 void __iomem *regs; 1689 void __iomem *regs;
@@ -1750,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi)
1750 if (active == -1) /* Aborted */ 1750 if (active == -1) /* Aborted */
1751 continue; 1751 continue;
1752 1752
1753 rqdone = &thrd->req[active]; 1753 /* Detach the req */
1754 rqdone = thrd->req[active].r;
1755 thrd->req[active].r = NULL;
1756
1754 mark_free(thrd, active); 1757 mark_free(thrd, active);
1755 1758
1756 /* Get going again ASAP */ 1759 /* Get going again ASAP */
@@ -1762,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi)
1762 } 1765 }
1763 1766
1764 /* Now that we are in no hurry, do the callbacks */ 1767 /* Now that we are in no hurry, do the callbacks */
1765 while (!list_empty(&pl330->req_done)) { 1768 list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
1766 struct pl330_req *r; 1769 list_del(&rqdone->rqd);
1767
1768 rqdone = container_of(pl330->req_done.next,
1769 struct _pl330_req, rqd);
1770
1771 list_del_init(&rqdone->rqd);
1772
1773 /* Detach the req */
1774 r = rqdone->r;
1775 rqdone->r = NULL;
1776 1770
1777 spin_unlock_irqrestore(&pl330->lock, flags); 1771 spin_unlock_irqrestore(&pl330->lock, flags);
1778 _callback(r, PL330_ERR_NONE); 1772 _callback(rqdone, PL330_ERR_NONE);
1779 spin_lock_irqsave(&pl330->lock, flags); 1773 spin_lock_irqsave(&pl330->lock, flags);
1780 } 1774 }
1781 1775
@@ -2321,7 +2315,7 @@ static void pl330_tasklet(unsigned long data)
2321 /* Pick up ripe tomatoes */ 2315 /* Pick up ripe tomatoes */
2322 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 2316 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2323 if (desc->status == DONE) { 2317 if (desc->status == DONE) {
2324 if (pch->cyclic) 2318 if (!pch->cyclic)
2325 dma_cookie_complete(&desc->txd); 2319 dma_cookie_complete(&desc->txd);
2326 list_move_tail(&desc->node, &list); 2320 list_move_tail(&desc->node, &list);
2327 } 2321 }
@@ -2539,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
2539} 2533}
2540 2534
2541/* Returns the number of descriptors added to the DMAC pool */ 2535/* Returns the number of descriptors added to the DMAC pool */
2542int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) 2536static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2543{ 2537{
2544 struct dma_pl330_desc *desc; 2538 struct dma_pl330_desc *desc;
2545 unsigned long flags; 2539 unsigned long flags;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 10f375032e96..de5ba86e8b89 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -164,7 +164,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
164 else 164 else
165 return (char *)ptr; 165 return (char *)ptr;
166 166
167 r = size % align; 167 r = (unsigned long)p % align;
168 168
169 if (r == 0) 169 if (r == 0)
170 return (char *)ptr; 170 return (char *)ptr;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index d27778f65a5d..a499c7ed820a 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1814,12 +1814,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1814 if (mce->bank != 8) 1814 if (mce->bank != 8)
1815 return NOTIFY_DONE; 1815 return NOTIFY_DONE;
1816 1816
1817#ifdef CONFIG_SMP
1818 /* Only handle if it is the right mc controller */
1819 if (mce->socketid != pvt->i7core_dev->socket)
1820 return NOTIFY_DONE;
1821#endif
1822
1823 smp_rmb(); 1817 smp_rmb();
1824 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { 1818 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1825 smp_wmb(); 1819 smp_wmb();
@@ -2116,8 +2110,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2116 if (pvt->enable_scrub) 2110 if (pvt->enable_scrub)
2117 disable_sdram_scrub_setting(mci); 2111 disable_sdram_scrub_setting(mci);
2118 2112
2119 mce_unregister_decode_chain(&i7_mce_dec);
2120
2121 /* Disable EDAC polling */ 2113 /* Disable EDAC polling */
2122 i7core_pci_ctl_release(pvt); 2114 i7core_pci_ctl_release(pvt);
2123 2115
@@ -2222,8 +2214,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2222 /* DCLK for scrub rate setting */ 2214 /* DCLK for scrub rate setting */
2223 pvt->dclk_freq = get_dclk_freq(); 2215 pvt->dclk_freq = get_dclk_freq();
2224 2216
2225 mce_register_decode_chain(&i7_mce_dec);
2226
2227 return 0; 2217 return 0;
2228 2218
2229fail0: 2219fail0:
@@ -2367,8 +2357,10 @@ static int __init i7core_init(void)
2367 2357
2368 pci_rc = pci_register_driver(&i7core_driver); 2358 pci_rc = pci_register_driver(&i7core_driver);
2369 2359
2370 if (pci_rc >= 0) 2360 if (pci_rc >= 0) {
2361 mce_register_decode_chain(&i7_mce_dec);
2371 return 0; 2362 return 0;
2363 }
2372 2364
2373 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", 2365 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2374 pci_rc); 2366 pci_rc);
@@ -2384,6 +2376,7 @@ static void __exit i7core_exit(void)
2384{ 2376{
2385 debugf2("MC: " __FILE__ ": %s()\n", __func__); 2377 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2386 pci_unregister_driver(&i7core_driver); 2378 pci_unregister_driver(&i7core_driver);
2379 mce_unregister_decode_chain(&i7_mce_dec);
2387} 2380}
2388 2381
2389module_init(i7core_init); 2382module_init(i7core_init);
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index c6074c5cd1ef..8c87a5e87057 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -5,8 +5,6 @@
5 5
6#include <asm/mce.h> 6#include <asm/mce.h>
7 7
8#define BIT_64(n) (U64_C(1) << (n))
9
10#define EC(x) ((x) & 0xffff) 8#define EC(x) ((x) & 0xffff)
11#define XEC(x, mask) (((x) >> 16) & mask) 9#define XEC(x, mask) (((x) >> 16) & mask)
12 10
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4c402353ba98..0e374625f6f8 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -980,7 +980,8 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
980 layers[1].type = EDAC_MC_LAYER_CHANNEL; 980 layers[1].type = EDAC_MC_LAYER_CHANNEL;
981 layers[1].size = 1; 981 layers[1].size = 1;
982 layers[1].is_virt_csrow = false; 982 layers[1].is_virt_csrow = false;
983 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata)); 983 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
984 sizeof(*pdata));
984 if (!mci) { 985 if (!mci) {
985 devres_release_group(&op->dev, mpc85xx_mc_err_probe); 986 devres_release_group(&op->dev, mpc85xx_mc_err_probe);
986 return -ENOMEM; 987 return -ENOMEM;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4adaf4b7da99..36ad17e79d61 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -555,7 +555,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
555 pvt->is_close_pg = false; 555 pvt->is_close_pg = false;
556 } 556 }
557 557
558 pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg); 558 pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
559 if (IS_RDIMM_ENABLED(reg)) { 559 if (IS_RDIMM_ENABLED(reg)) {
560 /* FIXME: Can also be LRDIMM */ 560 /* FIXME: Can also be LRDIMM */
561 debugf0("Memory is registered\n"); 561 debugf0("Memory is registered\n");
@@ -1604,8 +1604,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1604 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 1604 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
1605 __func__, mci, &sbridge_dev->pdev[0]->dev); 1605 __func__, mci, &sbridge_dev->pdev[0]->dev);
1606 1606
1607 mce_unregister_decode_chain(&sbridge_mce_dec);
1608
1609 /* Remove MC sysfs nodes */ 1607 /* Remove MC sysfs nodes */
1610 edac_mc_del_mc(mci->dev); 1608 edac_mc_del_mc(mci->dev);
1611 1609
@@ -1682,7 +1680,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
1682 goto fail0; 1680 goto fail0;
1683 } 1681 }
1684 1682
1685 mce_register_decode_chain(&sbridge_mce_dec);
1686 return 0; 1683 return 0;
1687 1684
1688fail0: 1685fail0:
@@ -1811,8 +1808,10 @@ static int __init sbridge_init(void)
1811 1808
1812 pci_rc = pci_register_driver(&sbridge_driver); 1809 pci_rc = pci_register_driver(&sbridge_driver);
1813 1810
1814 if (pci_rc >= 0) 1811 if (pci_rc >= 0) {
1812 mce_register_decode_chain(&sbridge_mce_dec);
1815 return 0; 1813 return 0;
1814 }
1816 1815
1817 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", 1816 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
1818 pci_rc); 1817 pci_rc);
@@ -1828,6 +1827,7 @@ static void __exit sbridge_exit(void)
1828{ 1827{
1829 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1828 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1830 pci_unregister_driver(&sbridge_driver); 1829 pci_unregister_driver(&sbridge_driver);
1830 mce_unregister_decode_chain(&sbridge_mce_dec);
1831} 1831}
1832 1832
1833module_init(sbridge_init); 1833module_init(sbridge_init);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 23416e443765..a4ed30bd9a41 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -116,8 +116,8 @@ const char *max8997_extcon_cable[] = {
116 [5] = "Charge-downstream", 116 [5] = "Charge-downstream",
117 [6] = "MHL", 117 [6] = "MHL",
118 [7] = "Dock-desk", 118 [7] = "Dock-desk",
119 [7] = "Dock-card", 119 [8] = "Dock-card",
120 [8] = "JIG", 120 [9] = "JIG",
121 121
122 NULL, 122 NULL,
123}; 123};
@@ -514,6 +514,7 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev)
514 514
515 extcon_dev_unregister(info->edev); 515 extcon_dev_unregister(info->edev);
516 516
517 kfree(info->edev);
517 kfree(info); 518 kfree(info);
518 519
519 return 0; 520 return 0;
diff --git a/drivers/extcon/extcon_class.c b/drivers/extcon/extcon_class.c
index f598a700ec15..159aeb07b3ba 100644
--- a/drivers/extcon/extcon_class.c
+++ b/drivers/extcon/extcon_class.c
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
762#if defined(CONFIG_ANDROID) 762#if defined(CONFIG_ANDROID)
763 if (switch_class) 763 if (switch_class)
764 ret = class_compat_create_link(switch_class, edev->dev, 764 ret = class_compat_create_link(switch_class, edev->dev,
765 dev); 765 NULL);
766#endif /* CONFIG_ANDROID */ 766#endif /* CONFIG_ANDROID */
767 767
768 spin_lock_init(&edev->lock); 768 spin_lock_init(&edev->lock);
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c
index fe7a07b47336..8a0dcc11c7c7 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon_gpio.c
@@ -125,6 +125,7 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
125 if (ret < 0) 125 if (ret < 0)
126 goto err_request_irq; 126 goto err_request_irq;
127 127
128 platform_set_drvdata(pdev, extcon_data);
128 /* Perform initial detection */ 129 /* Perform initial detection */
129 gpio_extcon_work(&extcon_data->work.work); 130 gpio_extcon_work(&extcon_data->work.work);
130 131
@@ -146,6 +147,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
146 struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev); 147 struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
147 148
148 cancel_delayed_work_sync(&extcon_data->work); 149 cancel_delayed_work_sync(&extcon_data->work);
150 free_irq(extcon_data->irq, extcon_data);
149 gpio_free(extcon_data->gpio); 151 gpio_free(extcon_data->gpio);
150 extcon_dev_unregister(&extcon_data->edev); 152 extcon_dev_unregister(&extcon_data->edev);
151 devm_kfree(&pdev->dev, extcon_data); 153 devm_kfree(&pdev->dev, extcon_data);
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 7bb00448e13d..b6453d0e44ad 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void)
2833 } 2833 }
2834 2834
2835 /* need to set base address for gpc4 */ 2835 /* need to set base address for gpc4 */
2836 exonys5_gpios_1[11].base = gpio_base1 + 0x2E0; 2836 exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
2837 2837
2838 /* need to set base address for gpx */ 2838 /* need to set base address for gpx */
2839 chip = &exynos5_gpios_1[21]; 2839 chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index d7038230b71e..7053140c6596 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
35 {0,} 35 {0,}
36}; 36};
37 37
38
39static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
40{
41 struct apertures_struct *ap;
42 bool primary = false;
43
44 ap = alloc_apertures(1);
45 ap->ranges[0].base = pci_resource_start(pdev, 0);
46 ap->ranges[0].size = pci_resource_len(pdev, 0);
47
48#ifdef CONFIG_X86
49 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
50#endif
51 remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
52 kfree(ap);
53}
54
38static int __devinit 55static int __devinit
39cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 56cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40{ 57{
58 cirrus_kick_out_firmware_fb(pdev);
59
41 return drm_get_pci_dev(pdev, ent, &driver); 60 return drm_get_pci_dev(pdev, ent, &driver);
42} 61}
43 62
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 21bdfa8836f7..64ea597cb6d3 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -145,7 +145,7 @@ struct cirrus_device {
145 struct ttm_bo_device bdev; 145 struct ttm_bo_device bdev;
146 atomic_t validate_sequence; 146 atomic_t validate_sequence;
147 } ttm; 147 } ttm;
148 148 bool mm_inited;
149}; 149};
150 150
151 151
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ebcd11a5023..50e170f879de 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
275 pci_resource_len(dev->pdev, 0), 275 pci_resource_len(dev->pdev, 0),
276 DRM_MTRR_WC); 276 DRM_MTRR_WC);
277 277
278 cirrus->mm_inited = true;
278 return 0; 279 return 0;
279} 280}
280 281
281void cirrus_mm_fini(struct cirrus_device *cirrus) 282void cirrus_mm_fini(struct cirrus_device *cirrus)
282{ 283{
283 struct drm_device *dev = cirrus->dev; 284 struct drm_device *dev = cirrus->dev;
285
286 if (!cirrus->mm_inited)
287 return;
288
284 ttm_bo_device_release(&cirrus->ttm.bdev); 289 ttm_bo_device_release(&cirrus->ttm.bdev);
285 290
286 cirrus_ttm_global_release(cirrus); 291 cirrus_ttm_global_release(cirrus);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c3b5139eba7f..5873e481e5d2 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,7 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/export.h> 33#include <linux/module.h>
34#include "drmP.h" 34#include "drmP.h"
35#include "drm_edid.h" 35#include "drm_edid.h"
36#include "drm_edid_modes.h" 36#include "drm_edid_modes.h"
@@ -149,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
149} 149}
150EXPORT_SYMBOL(drm_edid_header_is_valid); 150EXPORT_SYMBOL(drm_edid_header_is_valid);
151 151
152static int edid_fixup __read_mostly = 6;
153module_param_named(edid_fixup, edid_fixup, int, 0400);
154MODULE_PARM_DESC(edid_fixup,
155 "Minimum number of valid EDID header bytes (0-8, default 6)");
152 156
153/* 157/*
154 * Sanity check the EDID block (base or extension). Return 0 if the block 158 * Sanity check the EDID block (base or extension). Return 0 if the block
@@ -160,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
160 u8 csum = 0; 164 u8 csum = 0;
161 struct edid *edid = (struct edid *)raw_edid; 165 struct edid *edid = (struct edid *)raw_edid;
162 166
167 if (edid_fixup > 8 || edid_fixup < 0)
168 edid_fixup = 6;
169
163 if (block == 0) { 170 if (block == 0) {
164 int score = drm_edid_header_is_valid(raw_edid); 171 int score = drm_edid_header_is_valid(raw_edid);
165 if (score == 8) ; 172 if (score == 8) ;
166 else if (score >= 6) { 173 else if (score >= edid_fixup) {
167 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); 174 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
168 memcpy(raw_edid, edid_header, sizeof(edid_header)); 175 memcpy(raw_edid, edid_header, sizeof(edid_header));
169 } else { 176 } else {
@@ -603,7 +610,7 @@ static bool
603drm_monitor_supports_rb(struct edid *edid) 610drm_monitor_supports_rb(struct edid *edid)
604{ 611{
605 if (edid->revision >= 4) { 612 if (edid->revision >= 4) {
606 bool ret; 613 bool ret = false;
607 drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); 614 drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
608 return ret; 615 return ret;
609 } 616 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 420953197d0a..d6de2e07fa03 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = {
244}; 244};
245 245
246static struct drm_driver exynos_drm_driver = { 246static struct drm_driver exynos_drm_driver = {
247 .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | 247 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
248 DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 248 DRIVER_GEM | DRIVER_PRIME,
249 .load = exynos_drm_load, 249 .load = exynos_drm_load,
250 .unload = exynos_drm_unload, 250 .unload = exynos_drm_unload,
251 .open = exynos_drm_open, 251 .open = exynos_drm_open,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 6e9ac7bd1dcf..23d5ad379f86 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
172 manager_ops->commit(manager->dev); 172 manager_ops->commit(manager->dev);
173} 173}
174 174
175static struct drm_crtc *
176exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
177{
178 return encoder->crtc;
179}
180
181static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { 175static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
182 .dpms = exynos_drm_encoder_dpms, 176 .dpms = exynos_drm_encoder_dpms,
183 .mode_fixup = exynos_drm_encoder_mode_fixup, 177 .mode_fixup = exynos_drm_encoder_mode_fixup,
184 .mode_set = exynos_drm_encoder_mode_set, 178 .mode_set = exynos_drm_encoder_mode_set,
185 .prepare = exynos_drm_encoder_prepare, 179 .prepare = exynos_drm_encoder_prepare,
186 .commit = exynos_drm_encoder_commit, 180 .commit = exynos_drm_encoder_commit,
187 .get_crtc = exynos_drm_encoder_get_crtc,
188}; 181};
189 182
190static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) 183static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index f82a299553fb..4ccfe4328fab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -51,11 +51,22 @@ struct exynos_drm_fb {
51static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 51static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
52{ 52{
53 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 53 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
54 unsigned int i;
54 55
55 DRM_DEBUG_KMS("%s\n", __FILE__); 56 DRM_DEBUG_KMS("%s\n", __FILE__);
56 57
57 drm_framebuffer_cleanup(fb); 58 drm_framebuffer_cleanup(fb);
58 59
60 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
61 struct drm_gem_object *obj;
62
63 if (exynos_fb->exynos_gem_obj[i] == NULL)
64 continue;
65
66 obj = &exynos_fb->exynos_gem_obj[i]->base;
67 drm_gem_object_unreference_unlocked(obj);
68 }
69
59 kfree(exynos_fb); 70 kfree(exynos_fb);
60 exynos_fb = NULL; 71 exynos_fb = NULL;
61} 72}
@@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
134 return ERR_PTR(-ENOENT); 145 return ERR_PTR(-ENOENT);
135 } 146 }
136 147
137 drm_gem_object_unreference_unlocked(obj);
138
139 fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); 148 fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
140 if (IS_ERR(fb)) 149 if (IS_ERR(fb)) {
150 drm_gem_object_unreference_unlocked(obj);
141 return fb; 151 return fb;
152 }
142 153
143 exynos_fb = to_exynos_fb(fb); 154 exynos_fb = to_exynos_fb(fb);
144 nr = exynos_drm_format_num_buffers(fb->pixel_format); 155 nr = exynos_drm_format_num_buffers(fb->pixel_format);
@@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
152 return ERR_PTR(-ENOENT); 163 return ERR_PTR(-ENOENT);
153 } 164 }
154 165
155 drm_gem_object_unreference_unlocked(obj);
156
157 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); 166 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
158 } 167 }
159 168
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 3ecb30d93552..50823756cdea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -31,10 +31,10 @@
31static inline int exynos_drm_format_num_buffers(uint32_t format) 31static inline int exynos_drm_format_num_buffers(uint32_t format)
32{ 32{
33 switch (format) { 33 switch (format) {
34 case DRM_FORMAT_NV12M: 34 case DRM_FORMAT_NV12:
35 case DRM_FORMAT_NV12MT: 35 case DRM_FORMAT_NV12MT:
36 return 2; 36 return 2;
37 case DRM_FORMAT_YUV420M: 37 case DRM_FORMAT_YUV420:
38 return 3; 38 return 3;
39 default: 39 default:
40 return 1; 40 return 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index fc91293c4560..5c8b683029ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -689,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
689 struct drm_device *dev, uint32_t handle, 689 struct drm_device *dev, uint32_t handle,
690 uint64_t *offset) 690 uint64_t *offset)
691{ 691{
692 struct exynos_drm_gem_obj *exynos_gem_obj;
693 struct drm_gem_object *obj; 692 struct drm_gem_object *obj;
694 int ret = 0; 693 int ret = 0;
695 694
@@ -710,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
710 goto unlock; 709 goto unlock;
711 } 710 }
712 711
713 exynos_gem_obj = to_exynos_gem_obj(obj); 712 if (!obj->map_list.map) {
714 713 ret = drm_gem_create_mmap_offset(obj);
715 if (!exynos_gem_obj->base.map_list.map) {
716 ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
717 if (ret) 714 if (ret)
718 goto out; 715 goto out;
719 } 716 }
720 717
721 *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT; 718 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
722 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 719 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
723 720
724out: 721out:
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 68ef01028375..e2147a2ddcec 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
365 switch (win_data->pixel_format) { 365 switch (win_data->pixel_format) {
366 case DRM_FORMAT_NV12MT: 366 case DRM_FORMAT_NV12MT:
367 tiled_mode = true; 367 tiled_mode = true;
368 case DRM_FORMAT_NV12M: 368 case DRM_FORMAT_NV12:
369 crcb_mode = false; 369 crcb_mode = false;
370 buf_num = 2; 370 buf_num = 2;
371 break; 371 break;
@@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx)
601 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); 601 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
602 602
603 /* setting graphical layers */ 603 /* setting graphical layers */
604
605 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ 604 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
606 val |= MXR_GRP_CFG_WIN_BLEND_EN; 605 val |= MXR_GRP_CFG_WIN_BLEND_EN;
606 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
607 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
607 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ 608 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
608 609
609 /* the same configuration for both layers */ 610 /* the same configuration for both layers */
610 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); 611 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
611
612 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
613 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
614 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); 612 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
615 613
614 /* setting video layers */
615 val = MXR_GRP_CFG_ALPHA_VAL(0);
616 mixer_reg_write(res, MXR_VIDEO_CFG, val);
617
616 /* configuration of Video Processor Registers */ 618 /* configuration of Video Processor Registers */
617 vp_win_reset(ctx); 619 vp_win_reset(ctx);
618 vp_default_filter(res); 620 vp_default_filter(res);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index f920fb5e42b6..fa9439159ebd 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -130,11 +130,10 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
130 return -EINVAL; 130 return -EINVAL;
131 131
132 /* This is all entirely broken */ 132 /* This is all entirely broken */
133 down_write(&current->mm->mmap_sem);
134 old_fops = file_priv->filp->f_op; 133 old_fops = file_priv->filp->f_op;
135 file_priv->filp->f_op = &i810_buffer_fops; 134 file_priv->filp->f_op = &i810_buffer_fops;
136 dev_priv->mmap_buffer = buf; 135 dev_priv->mmap_buffer = buf;
137 buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total, 136 buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
138 PROT_READ | PROT_WRITE, 137 PROT_READ | PROT_WRITE,
139 MAP_SHARED, buf->bus_address); 138 MAP_SHARED, buf->bus_address);
140 dev_priv->mmap_buffer = NULL; 139 dev_priv->mmap_buffer = NULL;
@@ -145,7 +144,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
145 retcode = PTR_ERR(buf_priv->virtual); 144 retcode = PTR_ERR(buf_priv->virtual);
146 buf_priv->virtual = NULL; 145 buf_priv->virtual = NULL;
147 } 146 }
148 up_write(&current->mm->mmap_sem);
149 147
150 return retcode; 148 return retcode;
151} 149}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 05adbf23951a..a378c0800304 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -233,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
233 .has_blt_ring = 1, 233 .has_blt_ring = 1,
234 .has_llc = 1, 234 .has_llc = 1,
235 .has_pch_split = 1, 235 .has_pch_split = 1,
236 .has_force_wake = 1,
236}; 237};
237 238
238static const struct intel_device_info intel_sandybridge_m_info = { 239static const struct intel_device_info intel_sandybridge_m_info = {
@@ -243,6 +244,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
243 .has_blt_ring = 1, 244 .has_blt_ring = 1,
244 .has_llc = 1, 245 .has_llc = 1,
245 .has_pch_split = 1, 246 .has_pch_split = 1,
247 .has_force_wake = 1,
246}; 248};
247 249
248static const struct intel_device_info intel_ivybridge_d_info = { 250static const struct intel_device_info intel_ivybridge_d_info = {
@@ -252,6 +254,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
252 .has_blt_ring = 1, 254 .has_blt_ring = 1,
253 .has_llc = 1, 255 .has_llc = 1,
254 .has_pch_split = 1, 256 .has_pch_split = 1,
257 .has_force_wake = 1,
255}; 258};
256 259
257static const struct intel_device_info intel_ivybridge_m_info = { 260static const struct intel_device_info intel_ivybridge_m_info = {
@@ -262,6 +265,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
262 .has_blt_ring = 1, 265 .has_blt_ring = 1,
263 .has_llc = 1, 266 .has_llc = 1,
264 .has_pch_split = 1, 267 .has_pch_split = 1,
268 .has_force_wake = 1,
265}; 269};
266 270
267static const struct intel_device_info intel_valleyview_m_info = { 271static const struct intel_device_info intel_valleyview_m_info = {
@@ -289,6 +293,7 @@ static const struct intel_device_info intel_haswell_d_info = {
289 .has_blt_ring = 1, 293 .has_blt_ring = 1,
290 .has_llc = 1, 294 .has_llc = 1,
291 .has_pch_split = 1, 295 .has_pch_split = 1,
296 .has_force_wake = 1,
292}; 297};
293 298
294static const struct intel_device_info intel_haswell_m_info = { 299static const struct intel_device_info intel_haswell_m_info = {
@@ -298,6 +303,7 @@ static const struct intel_device_info intel_haswell_m_info = {
298 .has_blt_ring = 1, 303 .has_blt_ring = 1,
299 .has_llc = 1, 304 .has_llc = 1,
300 .has_pch_split = 1, 305 .has_pch_split = 1,
306 .has_force_wake = 1,
301}; 307};
302 308
303static const struct pci_device_id pciidlist[] = { /* aka */ 309static const struct pci_device_id pciidlist[] = { /* aka */
@@ -1144,10 +1150,9 @@ MODULE_LICENSE("GPL and additional rights");
1144 1150
1145/* We give fast paths for the really cool registers */ 1151/* We give fast paths for the really cool registers */
1146#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1152#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1147 (((dev_priv)->info->gen >= 6) && \ 1153 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1148 ((reg) < 0x40000) && \ 1154 ((reg) < 0x40000) && \
1149 ((reg) != FORCEWAKE)) && \ 1155 ((reg) != FORCEWAKE))
1150 (!IS_VALLEYVIEW((dev_priv)->dev))
1151 1156
1152static bool IS_DISPLAYREG(u32 reg) 1157static bool IS_DISPLAYREG(u32 reg)
1153{ 1158{
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 24ef5d77927f..a0c15abbdcef 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -286,6 +286,7 @@ struct intel_device_info {
286 u8 is_ivybridge:1; 286 u8 is_ivybridge:1;
287 u8 is_valleyview:1; 287 u8 is_valleyview:1;
288 u8 has_pch_split:1; 288 u8 has_pch_split:1;
289 u8 has_force_wake:1;
289 u8 is_haswell:1; 290 u8 is_haswell:1;
290 u8 has_fbc:1; 291 u8 has_fbc:1;
291 u8 has_pipe_cxsr:1; 292 u8 has_pipe_cxsr:1;
@@ -1122,6 +1123,8 @@ struct drm_i915_file_private {
1122#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1123#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1123#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1124#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1124 1125
1126#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1127
1125#include "i915_trace.h" 1128#include "i915_trace.h"
1126 1129
1127/** 1130/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 84975e1e1f05..23f2ea0f0651 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -585,7 +585,7 @@ out:
585 return ret; 585 return ret;
586} 586}
587 587
588static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) 588static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
589{ 589{
590 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 590 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
591 int pipe; 591 int pipe;
@@ -625,6 +625,35 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
625 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 625 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
626} 626}
627 627
628static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
629{
630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
631 int pipe;
632
633 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
634 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
635 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
636 SDE_AUDIO_POWER_SHIFT_CPT);
637
638 if (pch_iir & SDE_AUX_MASK_CPT)
639 DRM_DEBUG_DRIVER("AUX channel interrupt\n");
640
641 if (pch_iir & SDE_GMBUS_CPT)
642 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
643
644 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
645 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
646
647 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
648 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
649
650 if (pch_iir & SDE_FDI_MASK_CPT)
651 for_each_pipe(pipe)
652 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
653 pipe_name(pipe),
654 I915_READ(FDI_RX_IIR(pipe)));
655}
656
628static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) 657static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
629{ 658{
630 struct drm_device *dev = (struct drm_device *) arg; 659 struct drm_device *dev = (struct drm_device *) arg;
@@ -666,7 +695,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
666 695
667 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 696 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
668 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 697 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
669 pch_irq_handler(dev, pch_iir); 698 cpt_irq_handler(dev, pch_iir);
670 699
671 /* clear PCH hotplug event before clear CPU irq */ 700 /* clear PCH hotplug event before clear CPU irq */
672 I915_WRITE(SDEIIR, pch_iir); 701 I915_WRITE(SDEIIR, pch_iir);
@@ -759,7 +788,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
759 if (de_iir & DE_PCH_EVENT) { 788 if (de_iir & DE_PCH_EVENT) {
760 if (pch_iir & hotplug_mask) 789 if (pch_iir & hotplug_mask)
761 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 790 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
762 pch_irq_handler(dev, pch_iir); 791 if (HAS_PCH_CPT(dev))
792 cpt_irq_handler(dev, pch_iir);
793 else
794 ibx_irq_handler(dev, pch_iir);
763 } 795 }
764 796
765 if (de_iir & DE_PCU_EVENT) { 797 if (de_iir & DE_PCU_EVENT) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0a61481cd2c2..9dfc4c5ff31e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -210,9 +210,17 @@
210#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) 210#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
211#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) 211#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
212#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) 212#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
213/* IVB has funny definitions for which plane to flip. */
214#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
215#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
216#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
217#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
218#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
219#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
213#define MI_ARB_ON_OFF MI_INSTR(0x08, 0) 220#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
214#define MI_ARB_ENABLE (1<<0) 221#define MI_ARB_ENABLE (1<<0)
215#define MI_ARB_DISABLE (0<<0) 222#define MI_ARB_DISABLE (0<<0)
223
216#define MI_SET_CONTEXT MI_INSTR(0x18, 0) 224#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
217#define MI_MM_SPACE_GTT (1<<8) 225#define MI_MM_SPACE_GTT (1<<8)
218#define MI_MM_SPACE_PHYSICAL (0<<8) 226#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -3391,7 +3399,7 @@
3391 3399
3392/* PCH */ 3400/* PCH */
3393 3401
3394/* south display engine interrupt */ 3402/* south display engine interrupt: IBX */
3395#define SDE_AUDIO_POWER_D (1 << 27) 3403#define SDE_AUDIO_POWER_D (1 << 27)
3396#define SDE_AUDIO_POWER_C (1 << 26) 3404#define SDE_AUDIO_POWER_C (1 << 26)
3397#define SDE_AUDIO_POWER_B (1 << 25) 3405#define SDE_AUDIO_POWER_B (1 << 25)
@@ -3427,15 +3435,44 @@
3427#define SDE_TRANSA_CRC_ERR (1 << 1) 3435#define SDE_TRANSA_CRC_ERR (1 << 1)
3428#define SDE_TRANSA_FIFO_UNDER (1 << 0) 3436#define SDE_TRANSA_FIFO_UNDER (1 << 0)
3429#define SDE_TRANS_MASK (0x3f) 3437#define SDE_TRANS_MASK (0x3f)
3430/* CPT */ 3438
3431#define SDE_CRT_HOTPLUG_CPT (1 << 19) 3439/* south display engine interrupt: CPT/PPT */
3440#define SDE_AUDIO_POWER_D_CPT (1 << 31)
3441#define SDE_AUDIO_POWER_C_CPT (1 << 30)
3442#define SDE_AUDIO_POWER_B_CPT (1 << 29)
3443#define SDE_AUDIO_POWER_SHIFT_CPT 29
3444#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
3445#define SDE_AUXD_CPT (1 << 27)
3446#define SDE_AUXC_CPT (1 << 26)
3447#define SDE_AUXB_CPT (1 << 25)
3448#define SDE_AUX_MASK_CPT (7 << 25)
3432#define SDE_PORTD_HOTPLUG_CPT (1 << 23) 3449#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
3433#define SDE_PORTC_HOTPLUG_CPT (1 << 22) 3450#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
3434#define SDE_PORTB_HOTPLUG_CPT (1 << 21) 3451#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
3452#define SDE_CRT_HOTPLUG_CPT (1 << 19)
3435#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ 3453#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
3436 SDE_PORTD_HOTPLUG_CPT | \ 3454 SDE_PORTD_HOTPLUG_CPT | \
3437 SDE_PORTC_HOTPLUG_CPT | \ 3455 SDE_PORTC_HOTPLUG_CPT | \
3438 SDE_PORTB_HOTPLUG_CPT) 3456 SDE_PORTB_HOTPLUG_CPT)
3457#define SDE_GMBUS_CPT (1 << 17)
3458#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
3459#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
3460#define SDE_FDI_RXC_CPT (1 << 8)
3461#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
3462#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
3463#define SDE_FDI_RXB_CPT (1 << 4)
3464#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
3465#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
3466#define SDE_FDI_RXA_CPT (1 << 0)
3467#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
3468 SDE_AUDIO_CP_REQ_B_CPT | \
3469 SDE_AUDIO_CP_REQ_A_CPT)
3470#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
3471 SDE_AUDIO_CP_CHG_B_CPT | \
3472 SDE_AUDIO_CP_CHG_A_CPT)
3473#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
3474 SDE_FDI_RXB_CPT | \
3475 SDE_FDI_RXA_CPT)
3439 3476
3440#define SDEISR 0xc4000 3477#define SDEISR 0xc4000
3441#define SDEIMR 0xc4004 3478#define SDEIMR 0xc4004
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 06721c0e9f98..b3052ef70d16 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6377,17 +6377,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
6377 struct drm_i915_private *dev_priv = dev->dev_private; 6377 struct drm_i915_private *dev_priv = dev->dev_private;
6378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6379 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 6379 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6380 uint32_t plane_bit = 0;
6380 int ret; 6381 int ret;
6381 6382
6382 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 6383 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6383 if (ret) 6384 if (ret)
6384 goto err; 6385 goto err;
6385 6386
6387 switch(intel_crtc->plane) {
6388 case PLANE_A:
6389 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
6390 break;
6391 case PLANE_B:
6392 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
6393 break;
6394 case PLANE_C:
6395 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
6396 break;
6397 default:
6398 WARN_ONCE(1, "unknown plane in flip command\n");
6399 ret = -ENODEV;
6400 goto err;
6401 }
6402
6386 ret = intel_ring_begin(ring, 4); 6403 ret = intel_ring_begin(ring, 4);
6387 if (ret) 6404 if (ret)
6388 goto err_unpin; 6405 goto err_unpin;
6389 6406
6390 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); 6407 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
6391 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 6408 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
6392 intel_ring_emit(ring, (obj->gtt_offset)); 6409 intel_ring_emit(ring, (obj->gtt_offset));
6393 intel_ring_emit(ring, (MI_NOOP)); 6410 intel_ring_emit(ring, (MI_NOOP));
@@ -6760,7 +6777,7 @@ static void intel_setup_outputs(struct drm_device *dev)
6760 if (I915_READ(HDMIC) & PORT_DETECTED) 6777 if (I915_READ(HDMIC) & PORT_DETECTED)
6761 intel_hdmi_init(dev, HDMIC); 6778 intel_hdmi_init(dev, HDMIC);
6762 6779
6763 if (I915_READ(HDMID) & PORT_DETECTED) 6780 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
6764 intel_hdmi_init(dev, HDMID); 6781 intel_hdmi_init(dev, HDMID);
6765 6782
6766 if (I915_READ(PCH_DP_C) & DP_DETECTED) 6783 if (I915_READ(PCH_DP_C) & DP_DETECTED)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6538c46fe959..76a708029dcb 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -32,6 +32,7 @@
32#include "drm.h" 32#include "drm.h"
33#include "drm_crtc.h" 33#include "drm_crtc.h"
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_edid.h"
35#include "intel_drv.h" 36#include "intel_drv.h"
36#include "i915_drm.h" 37#include "i915_drm.h"
37#include "i915_drv.h" 38#include "i915_drv.h"
@@ -67,6 +68,8 @@ struct intel_dp {
67 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 68 struct drm_display_mode *panel_fixed_mode; /* for eDP */
68 struct delayed_work panel_vdd_work; 69 struct delayed_work panel_vdd_work;
69 bool want_panel_vdd; 70 bool want_panel_vdd;
71 struct edid *edid; /* cached EDID for eDP */
72 int edid_mode_count;
70}; 73};
71 74
72/** 75/**
@@ -383,7 +386,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
383 int recv_bytes; 386 int recv_bytes;
384 uint32_t status; 387 uint32_t status;
385 uint32_t aux_clock_divider; 388 uint32_t aux_clock_divider;
386 int try, precharge = 5; 389 int try, precharge;
387 390
388 intel_dp_check_edp(intel_dp); 391 intel_dp_check_edp(intel_dp);
389 /* The clock divider is based off the hrawclk, 392 /* The clock divider is based off the hrawclk,
@@ -403,6 +406,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
403 else 406 else
404 aux_clock_divider = intel_hrawclk(dev) / 2; 407 aux_clock_divider = intel_hrawclk(dev) / 2;
405 408
409 if (IS_GEN6(dev))
410 precharge = 3;
411 else
412 precharge = 5;
413
406 /* Try to wait for any previous AUX channel activity */ 414 /* Try to wait for any previous AUX channel activity */
407 for (try = 0; try < 3; try++) { 415 for (try = 0; try < 3; try++) {
408 status = I915_READ(ch_ctl); 416 status = I915_READ(ch_ctl);
@@ -1980,6 +1988,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
1980 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 1988 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
1981 return; 1989 return;
1982 1990
1991 ironlake_edp_panel_vdd_on(intel_dp);
1992
1983 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 1993 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
1984 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 1994 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
1985 buf[0], buf[1], buf[2]); 1995 buf[0], buf[1], buf[2]);
@@ -1987,6 +1997,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
1987 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 1997 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
1988 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 1998 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
1989 buf[0], buf[1], buf[2]); 1999 buf[0], buf[1], buf[2]);
2000
2001 ironlake_edp_panel_vdd_off(intel_dp, false);
1990} 2002}
1991 2003
1992static bool 2004static bool
@@ -2121,10 +2133,22 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2121{ 2133{
2122 struct intel_dp *intel_dp = intel_attached_dp(connector); 2134 struct intel_dp *intel_dp = intel_attached_dp(connector);
2123 struct edid *edid; 2135 struct edid *edid;
2136 int size;
2137
2138 if (is_edp(intel_dp)) {
2139 if (!intel_dp->edid)
2140 return NULL;
2141
2142 size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
2143 edid = kmalloc(size, GFP_KERNEL);
2144 if (!edid)
2145 return NULL;
2146
2147 memcpy(edid, intel_dp->edid, size);
2148 return edid;
2149 }
2124 2150
2125 ironlake_edp_panel_vdd_on(intel_dp);
2126 edid = drm_get_edid(connector, adapter); 2151 edid = drm_get_edid(connector, adapter);
2127 ironlake_edp_panel_vdd_off(intel_dp, false);
2128 return edid; 2152 return edid;
2129} 2153}
2130 2154
@@ -2134,9 +2158,17 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
2134 struct intel_dp *intel_dp = intel_attached_dp(connector); 2158 struct intel_dp *intel_dp = intel_attached_dp(connector);
2135 int ret; 2159 int ret;
2136 2160
2137 ironlake_edp_panel_vdd_on(intel_dp); 2161 if (is_edp(intel_dp)) {
2162 drm_mode_connector_update_edid_property(connector,
2163 intel_dp->edid);
2164 ret = drm_add_edid_modes(connector, intel_dp->edid);
2165 drm_edid_to_eld(connector,
2166 intel_dp->edid);
2167 connector->display_info.raw_edid = NULL;
2168 return intel_dp->edid_mode_count;
2169 }
2170
2138 ret = intel_ddc_get_modes(connector, adapter); 2171 ret = intel_ddc_get_modes(connector, adapter);
2139 ironlake_edp_panel_vdd_off(intel_dp, false);
2140 return ret; 2172 return ret;
2141} 2173}
2142 2174
@@ -2326,6 +2358,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2326 i2c_del_adapter(&intel_dp->adapter); 2358 i2c_del_adapter(&intel_dp->adapter);
2327 drm_encoder_cleanup(encoder); 2359 drm_encoder_cleanup(encoder);
2328 if (is_edp(intel_dp)) { 2360 if (is_edp(intel_dp)) {
2361 kfree(intel_dp->edid);
2329 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2362 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2330 ironlake_panel_vdd_off_sync(intel_dp); 2363 ironlake_panel_vdd_off_sync(intel_dp);
2331 } 2364 }
@@ -2509,11 +2542,14 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2509 break; 2542 break;
2510 } 2543 }
2511 2544
2545 intel_dp_i2c_init(intel_dp, intel_connector, name);
2546
2512 /* Cache some DPCD data in the eDP case */ 2547 /* Cache some DPCD data in the eDP case */
2513 if (is_edp(intel_dp)) { 2548 if (is_edp(intel_dp)) {
2514 bool ret; 2549 bool ret;
2515 struct edp_power_seq cur, vbt; 2550 struct edp_power_seq cur, vbt;
2516 u32 pp_on, pp_off, pp_div; 2551 u32 pp_on, pp_off, pp_div;
2552 struct edid *edid;
2517 2553
2518 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2554 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2519 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2555 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2581,9 +2617,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2581 intel_dp_destroy(&intel_connector->base); 2617 intel_dp_destroy(&intel_connector->base);
2582 return; 2618 return;
2583 } 2619 }
2584 }
2585 2620
2586 intel_dp_i2c_init(intel_dp, intel_connector, name); 2621 ironlake_edp_panel_vdd_on(intel_dp);
2622 edid = drm_get_edid(connector, &intel_dp->adapter);
2623 if (edid) {
2624 drm_mode_connector_update_edid_property(connector,
2625 edid);
2626 intel_dp->edid_mode_count =
2627 drm_add_edid_modes(connector, edid);
2628 drm_edid_to_eld(connector, edid);
2629 intel_dp->edid = edid;
2630 }
2631 ironlake_edp_panel_vdd_off(intel_dp, false);
2632 }
2587 2633
2588 intel_encoder->hot_plug = intel_dp_hot_plug; 2634 intel_encoder->hot_plug = intel_dp_hot_plug;
2589 2635
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7a16f16371e6..f30a53a8917e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -267,10 +267,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
267 267
268static int init_ring_common(struct intel_ring_buffer *ring) 268static int init_ring_common(struct intel_ring_buffer *ring)
269{ 269{
270 drm_i915_private_t *dev_priv = ring->dev->dev_private; 270 struct drm_device *dev = ring->dev;
271 drm_i915_private_t *dev_priv = dev->dev_private;
271 struct drm_i915_gem_object *obj = ring->obj; 272 struct drm_i915_gem_object *obj = ring->obj;
273 int ret = 0;
272 u32 head; 274 u32 head;
273 275
276 if (HAS_FORCE_WAKE(dev))
277 gen6_gt_force_wake_get(dev_priv);
278
274 /* Stop the ring if it's running. */ 279 /* Stop the ring if it's running. */
275 I915_WRITE_CTL(ring, 0); 280 I915_WRITE_CTL(ring, 0);
276 I915_WRITE_HEAD(ring, 0); 281 I915_WRITE_HEAD(ring, 0);
@@ -318,7 +323,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
318 I915_READ_HEAD(ring), 323 I915_READ_HEAD(ring),
319 I915_READ_TAIL(ring), 324 I915_READ_TAIL(ring),
320 I915_READ_START(ring)); 325 I915_READ_START(ring));
321 return -EIO; 326 ret = -EIO;
327 goto out;
322 } 328 }
323 329
324 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 330 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -327,9 +333,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
327 ring->head = I915_READ_HEAD(ring); 333 ring->head = I915_READ_HEAD(ring);
328 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 334 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
329 ring->space = ring_space(ring); 335 ring->space = ring_space(ring);
336 ring->last_retired_head = -1;
330 } 337 }
331 338
332 return 0; 339out:
340 if (HAS_FORCE_WAKE(dev))
341 gen6_gt_force_wake_put(dev_priv);
342
343 return ret;
333} 344}
334 345
335static int 346static int
@@ -1006,6 +1017,10 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1006 if (ret) 1017 if (ret)
1007 goto err_unref; 1018 goto err_unref;
1008 1019
1020 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1021 if (ret)
1022 goto err_unpin;
1023
1009 ring->virtual_start = 1024 ring->virtual_start =
1010 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, 1025 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
1011 ring->size); 1026 ring->size);
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 3c8e04f54713..93e832d6c328 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
41 41
42MODULE_DEVICE_TABLE(pci, pciidlist); 42MODULE_DEVICE_TABLE(pci, pciidlist);
43 43
44static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
45{
46 struct apertures_struct *ap;
47 bool primary = false;
48
49 ap = alloc_apertures(1);
50 ap->ranges[0].base = pci_resource_start(pdev, 0);
51 ap->ranges[0].size = pci_resource_len(pdev, 0);
52
53#ifdef CONFIG_X86
54 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
55#endif
56 remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
57 kfree(ap);
58}
59
60
44static int __devinit 61static int __devinit
45mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 62mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
46{ 63{
64 mgag200_kick_out_firmware_fb(pdev);
65
47 return drm_get_pci_dev(pdev, ent, &driver); 66 return drm_get_pci_dev(pdev, ent, &driver);
48} 67}
49 68
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 01d77d1554f4..3904d7964a4b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1149,7 +1149,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1149 } 1149 }
1150 1150
1151 if (tiling_flags & RADEON_TILING_MACRO) { 1151 if (tiling_flags & RADEON_TILING_MACRO) {
1152 if (rdev->family >= CHIP_CAYMAN) 1152 if (rdev->family >= CHIP_TAHITI)
1153 tmp = rdev->config.si.tile_config;
1154 else if (rdev->family >= CHIP_CAYMAN)
1153 tmp = rdev->config.cayman.tile_config; 1155 tmp = rdev->config.cayman.tile_config;
1154 else 1156 else
1155 tmp = rdev->config.evergreen.tile_config; 1157 tmp = rdev->config.evergreen.tile_config;
@@ -1177,6 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1177 } else if (tiling_flags & RADEON_TILING_MICRO) 1179 } else if (tiling_flags & RADEON_TILING_MICRO)
1178 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1179 1181
1182 if ((rdev->family == CHIP_TAHITI) ||
1183 (rdev->family == CHIP_PITCAIRN))
1184 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1185 else if (rdev->family == CHIP_VERDE)
1186 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1187
1180 switch (radeon_crtc->crtc_id) { 1188 switch (radeon_crtc->crtc_id) {
1181 case 0: 1189 case 0:
1182 WREG32(AVIVO_D1VGA_CONTROL, 0); 1190 WREG32(AVIVO_D1VGA_CONTROL, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e7b1ec5ae8c6..486ccdf4aacd 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1926,7 +1926,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1926 1926
1927 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 1927 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
1928 r600_hdmi_enable(encoder); 1928 r600_hdmi_enable(encoder);
1929 if (ASIC_IS_DCE4(rdev)) 1929 if (ASIC_IS_DCE6(rdev))
1930 ; /* TODO (use pointers instead of if-s?) */
1931 else if (ASIC_IS_DCE4(rdev))
1930 evergreen_hdmi_setmode(encoder, adjusted_mode); 1932 evergreen_hdmi_setmode(encoder, adjusted_mode);
1931 else 1933 else
1932 r600_hdmi_setmode(encoder, adjusted_mode); 1934 r600_hdmi_setmode(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 58991af90502..7fb3d2e0434c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1029 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 1029 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1030 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 1030 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1031 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 1031 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1032 if ((rdev->family == CHIP_JUNIPER) ||
1033 (rdev->family == CHIP_CYPRESS) ||
1034 (rdev->family == CHIP_HEMLOCK) ||
1035 (rdev->family == CHIP_BARTS))
1036 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
1032 } 1037 }
1033 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 1038 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1034 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 1039 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1553/* 1558/*
1554 * Core functions 1559 * Core functions
1555 */ 1560 */
1556static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1557 u32 num_tile_pipes,
1558 u32 num_backends,
1559 u32 backend_disable_mask)
1560{
1561 u32 backend_map = 0;
1562 u32 enabled_backends_mask = 0;
1563 u32 enabled_backends_count = 0;
1564 u32 cur_pipe;
1565 u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
1566 u32 cur_backend = 0;
1567 u32 i;
1568 bool force_no_swizzle;
1569
1570 if (num_tile_pipes > EVERGREEN_MAX_PIPES)
1571 num_tile_pipes = EVERGREEN_MAX_PIPES;
1572 if (num_tile_pipes < 1)
1573 num_tile_pipes = 1;
1574 if (num_backends > EVERGREEN_MAX_BACKENDS)
1575 num_backends = EVERGREEN_MAX_BACKENDS;
1576 if (num_backends < 1)
1577 num_backends = 1;
1578
1579 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1580 if (((backend_disable_mask >> i) & 1) == 0) {
1581 enabled_backends_mask |= (1 << i);
1582 ++enabled_backends_count;
1583 }
1584 if (enabled_backends_count == num_backends)
1585 break;
1586 }
1587
1588 if (enabled_backends_count == 0) {
1589 enabled_backends_mask = 1;
1590 enabled_backends_count = 1;
1591 }
1592
1593 if (enabled_backends_count != num_backends)
1594 num_backends = enabled_backends_count;
1595
1596 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
1597 switch (rdev->family) {
1598 case CHIP_CEDAR:
1599 case CHIP_REDWOOD:
1600 case CHIP_PALM:
1601 case CHIP_SUMO:
1602 case CHIP_SUMO2:
1603 case CHIP_TURKS:
1604 case CHIP_CAICOS:
1605 force_no_swizzle = false;
1606 break;
1607 case CHIP_CYPRESS:
1608 case CHIP_HEMLOCK:
1609 case CHIP_JUNIPER:
1610 case CHIP_BARTS:
1611 default:
1612 force_no_swizzle = true;
1613 break;
1614 }
1615 if (force_no_swizzle) {
1616 bool last_backend_enabled = false;
1617
1618 force_no_swizzle = false;
1619 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1620 if (((enabled_backends_mask >> i) & 1) == 1) {
1621 if (last_backend_enabled)
1622 force_no_swizzle = true;
1623 last_backend_enabled = true;
1624 } else
1625 last_backend_enabled = false;
1626 }
1627 }
1628
1629 switch (num_tile_pipes) {
1630 case 1:
1631 case 3:
1632 case 5:
1633 case 7:
1634 DRM_ERROR("odd number of pipes!\n");
1635 break;
1636 case 2:
1637 swizzle_pipe[0] = 0;
1638 swizzle_pipe[1] = 1;
1639 break;
1640 case 4:
1641 if (force_no_swizzle) {
1642 swizzle_pipe[0] = 0;
1643 swizzle_pipe[1] = 1;
1644 swizzle_pipe[2] = 2;
1645 swizzle_pipe[3] = 3;
1646 } else {
1647 swizzle_pipe[0] = 0;
1648 swizzle_pipe[1] = 2;
1649 swizzle_pipe[2] = 1;
1650 swizzle_pipe[3] = 3;
1651 }
1652 break;
1653 case 6:
1654 if (force_no_swizzle) {
1655 swizzle_pipe[0] = 0;
1656 swizzle_pipe[1] = 1;
1657 swizzle_pipe[2] = 2;
1658 swizzle_pipe[3] = 3;
1659 swizzle_pipe[4] = 4;
1660 swizzle_pipe[5] = 5;
1661 } else {
1662 swizzle_pipe[0] = 0;
1663 swizzle_pipe[1] = 2;
1664 swizzle_pipe[2] = 4;
1665 swizzle_pipe[3] = 1;
1666 swizzle_pipe[4] = 3;
1667 swizzle_pipe[5] = 5;
1668 }
1669 break;
1670 case 8:
1671 if (force_no_swizzle) {
1672 swizzle_pipe[0] = 0;
1673 swizzle_pipe[1] = 1;
1674 swizzle_pipe[2] = 2;
1675 swizzle_pipe[3] = 3;
1676 swizzle_pipe[4] = 4;
1677 swizzle_pipe[5] = 5;
1678 swizzle_pipe[6] = 6;
1679 swizzle_pipe[7] = 7;
1680 } else {
1681 swizzle_pipe[0] = 0;
1682 swizzle_pipe[1] = 2;
1683 swizzle_pipe[2] = 4;
1684 swizzle_pipe[3] = 6;
1685 swizzle_pipe[4] = 1;
1686 swizzle_pipe[5] = 3;
1687 swizzle_pipe[6] = 5;
1688 swizzle_pipe[7] = 7;
1689 }
1690 break;
1691 }
1692
1693 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1694 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1695 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1696
1697 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1698
1699 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1700 }
1701
1702 return backend_map;
1703}
1704
1705static void evergreen_gpu_init(struct radeon_device *rdev) 1561static void evergreen_gpu_init(struct radeon_device *rdev)
1706{ 1562{
1707 u32 cc_rb_backend_disable = 0; 1563 u32 gb_addr_config;
1708 u32 cc_gc_shader_pipe_config;
1709 u32 gb_addr_config = 0;
1710 u32 mc_shared_chmap, mc_arb_ramcfg; 1564 u32 mc_shared_chmap, mc_arb_ramcfg;
1711 u32 gb_backend_map;
1712 u32 grbm_gfx_index;
1713 u32 sx_debug_1; 1565 u32 sx_debug_1;
1714 u32 smx_dc_ctl0; 1566 u32 smx_dc_ctl0;
1715 u32 sq_config; 1567 u32 sq_config;
@@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1724 u32 sq_stack_resource_mgmt_3; 1576 u32 sq_stack_resource_mgmt_3;
1725 u32 vgt_cache_invalidation; 1577 u32 vgt_cache_invalidation;
1726 u32 hdp_host_path_cntl, tmp; 1578 u32 hdp_host_path_cntl, tmp;
1579 u32 disabled_rb_mask;
1727 int i, j, num_shader_engines, ps_thread_count; 1580 int i, j, num_shader_engines, ps_thread_count;
1728 1581
1729 switch (rdev->family) { 1582 switch (rdev->family) {
@@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1748 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1601 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1749 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1602 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1750 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1603 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1604 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
1751 break; 1605 break;
1752 case CHIP_JUNIPER: 1606 case CHIP_JUNIPER:
1753 rdev->config.evergreen.num_ses = 1; 1607 rdev->config.evergreen.num_ses = 1;
@@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1769 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1623 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1770 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1624 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1771 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1625 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1626 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
1772 break; 1627 break;
1773 case CHIP_REDWOOD: 1628 case CHIP_REDWOOD:
1774 rdev->config.evergreen.num_ses = 1; 1629 rdev->config.evergreen.num_ses = 1;
@@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1790 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1645 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1791 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1646 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1792 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1647 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1648 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1793 break; 1649 break;
1794 case CHIP_CEDAR: 1650 case CHIP_CEDAR:
1795 default: 1651 default:
@@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1812 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1668 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1813 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1669 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1814 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1670 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1671 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1815 break; 1672 break;
1816 case CHIP_PALM: 1673 case CHIP_PALM:
1817 rdev->config.evergreen.num_ses = 1; 1674 rdev->config.evergreen.num_ses = 1;
@@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1833 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1690 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1834 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1691 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1835 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1692 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1693 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1836 break; 1694 break;
1837 case CHIP_SUMO: 1695 case CHIP_SUMO:
1838 rdev->config.evergreen.num_ses = 1; 1696 rdev->config.evergreen.num_ses = 1;
@@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1860 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1718 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1861 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1719 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1862 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1720 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1721 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1863 break; 1722 break;
1864 case CHIP_SUMO2: 1723 case CHIP_SUMO2:
1865 rdev->config.evergreen.num_ses = 1; 1724 rdev->config.evergreen.num_ses = 1;
@@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1881 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1740 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1882 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1741 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1883 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1742 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1743 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1884 break; 1744 break;
1885 case CHIP_BARTS: 1745 case CHIP_BARTS:
1886 rdev->config.evergreen.num_ses = 2; 1746 rdev->config.evergreen.num_ses = 2;
@@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1902 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1762 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1903 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1763 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1904 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1764 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1765 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
1905 break; 1766 break;
1906 case CHIP_TURKS: 1767 case CHIP_TURKS:
1907 rdev->config.evergreen.num_ses = 1; 1768 rdev->config.evergreen.num_ses = 1;
@@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1923 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1784 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1924 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1785 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1925 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1786 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1787 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
1926 break; 1788 break;
1927 case CHIP_CAICOS: 1789 case CHIP_CAICOS:
1928 rdev->config.evergreen.num_ses = 1; 1790 rdev->config.evergreen.num_ses = 1;
@@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1944 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1806 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1945 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1807 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1946 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1808 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1809 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
1947 break; 1810 break;
1948 } 1811 }
1949 1812
@@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1960 1823
1961 evergreen_fix_pci_max_read_req_size(rdev); 1824 evergreen_fix_pci_max_read_req_size(rdev);
1962 1825
1963 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1964
1965 cc_gc_shader_pipe_config |=
1966 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
1967 & EVERGREEN_MAX_PIPES_MASK);
1968 cc_gc_shader_pipe_config |=
1969 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
1970 & EVERGREEN_MAX_SIMDS_MASK);
1971
1972 cc_rb_backend_disable =
1973 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
1974 & EVERGREEN_MAX_BACKENDS_MASK);
1975
1976
1977 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1826 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1978 if ((rdev->family == CHIP_PALM) || 1827 if ((rdev->family == CHIP_PALM) ||
1979 (rdev->family == CHIP_SUMO) || 1828 (rdev->family == CHIP_SUMO) ||
@@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1982 else 1831 else
1983 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1832 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1984 1833
1985 switch (rdev->config.evergreen.max_tile_pipes) {
1986 case 1:
1987 default:
1988 gb_addr_config |= NUM_PIPES(0);
1989 break;
1990 case 2:
1991 gb_addr_config |= NUM_PIPES(1);
1992 break;
1993 case 4:
1994 gb_addr_config |= NUM_PIPES(2);
1995 break;
1996 case 8:
1997 gb_addr_config |= NUM_PIPES(3);
1998 break;
1999 }
2000
2001 gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2002 gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
2003 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
2004 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
2005 gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
2006 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
2007
2008 if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
2009 gb_addr_config |= ROW_SIZE(2);
2010 else
2011 gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
2012
2013 if (rdev->ddev->pdev->device == 0x689e) {
2014 u32 efuse_straps_4;
2015 u32 efuse_straps_3;
2016 u8 efuse_box_bit_131_124;
2017
2018 WREG32(RCU_IND_INDEX, 0x204);
2019 efuse_straps_4 = RREG32(RCU_IND_DATA);
2020 WREG32(RCU_IND_INDEX, 0x203);
2021 efuse_straps_3 = RREG32(RCU_IND_DATA);
2022 efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
2023
2024 switch(efuse_box_bit_131_124) {
2025 case 0x00:
2026 gb_backend_map = 0x76543210;
2027 break;
2028 case 0x55:
2029 gb_backend_map = 0x77553311;
2030 break;
2031 case 0x56:
2032 gb_backend_map = 0x77553300;
2033 break;
2034 case 0x59:
2035 gb_backend_map = 0x77552211;
2036 break;
2037 case 0x66:
2038 gb_backend_map = 0x77443300;
2039 break;
2040 case 0x99:
2041 gb_backend_map = 0x66552211;
2042 break;
2043 case 0x5a:
2044 gb_backend_map = 0x77552200;
2045 break;
2046 case 0xaa:
2047 gb_backend_map = 0x66442200;
2048 break;
2049 case 0x95:
2050 gb_backend_map = 0x66553311;
2051 break;
2052 default:
2053 DRM_ERROR("bad backend map, using default\n");
2054 gb_backend_map =
2055 evergreen_get_tile_pipe_to_backend_map(rdev,
2056 rdev->config.evergreen.max_tile_pipes,
2057 rdev->config.evergreen.max_backends,
2058 ((EVERGREEN_MAX_BACKENDS_MASK <<
2059 rdev->config.evergreen.max_backends) &
2060 EVERGREEN_MAX_BACKENDS_MASK));
2061 break;
2062 }
2063 } else if (rdev->ddev->pdev->device == 0x68b9) {
2064 u32 efuse_straps_3;
2065 u8 efuse_box_bit_127_124;
2066
2067 WREG32(RCU_IND_INDEX, 0x203);
2068 efuse_straps_3 = RREG32(RCU_IND_DATA);
2069 efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
2070
2071 switch(efuse_box_bit_127_124) {
2072 case 0x0:
2073 gb_backend_map = 0x00003210;
2074 break;
2075 case 0x5:
2076 case 0x6:
2077 case 0x9:
2078 case 0xa:
2079 gb_backend_map = 0x00003311;
2080 break;
2081 default:
2082 DRM_ERROR("bad backend map, using default\n");
2083 gb_backend_map =
2084 evergreen_get_tile_pipe_to_backend_map(rdev,
2085 rdev->config.evergreen.max_tile_pipes,
2086 rdev->config.evergreen.max_backends,
2087 ((EVERGREEN_MAX_BACKENDS_MASK <<
2088 rdev->config.evergreen.max_backends) &
2089 EVERGREEN_MAX_BACKENDS_MASK));
2090 break;
2091 }
2092 } else {
2093 switch (rdev->family) {
2094 case CHIP_CYPRESS:
2095 case CHIP_HEMLOCK:
2096 case CHIP_BARTS:
2097 gb_backend_map = 0x66442200;
2098 break;
2099 case CHIP_JUNIPER:
2100 gb_backend_map = 0x00002200;
2101 break;
2102 default:
2103 gb_backend_map =
2104 evergreen_get_tile_pipe_to_backend_map(rdev,
2105 rdev->config.evergreen.max_tile_pipes,
2106 rdev->config.evergreen.max_backends,
2107 ((EVERGREEN_MAX_BACKENDS_MASK <<
2108 rdev->config.evergreen.max_backends) &
2109 EVERGREEN_MAX_BACKENDS_MASK));
2110 }
2111 }
2112
2113 /* setup tiling info dword. gb_addr_config is not adequate since it does 1834 /* setup tiling info dword. gb_addr_config is not adequate since it does
2114 * not have bank info, so create a custom tiling dword. 1835 * not have bank info, so create a custom tiling dword.
2115 * bits 3:0 num_pipes 1836 * bits 3:0 num_pipes
@@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2136 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 1857 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
2137 if (rdev->flags & RADEON_IS_IGP) 1858 if (rdev->flags & RADEON_IS_IGP)
2138 rdev->config.evergreen.tile_config |= 1 << 4; 1859 rdev->config.evergreen.tile_config |= 1 << 4;
2139 else 1860 else {
2140 rdev->config.evergreen.tile_config |= 1861 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
2141 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1862 rdev->config.evergreen.tile_config |= 1 << 4;
2142 rdev->config.evergreen.tile_config |= 1863 else
2143 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; 1864 rdev->config.evergreen.tile_config |= 0 << 4;
1865 }
1866 rdev->config.evergreen.tile_config |= 0 << 8;
2144 rdev->config.evergreen.tile_config |= 1867 rdev->config.evergreen.tile_config |=
2145 ((gb_addr_config & 0x30000000) >> 28) << 12; 1868 ((gb_addr_config & 0x30000000) >> 28) << 12;
2146 1869
2147 rdev->config.evergreen.backend_map = gb_backend_map; 1870 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
2148 WREG32(GB_BACKEND_MAP, gb_backend_map);
2149 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2150 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2151 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2152 1871
2153 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; 1872 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
2154 grbm_gfx_index = INSTANCE_BROADCAST_WRITES; 1873 u32 efuse_straps_4;
2155 1874 u32 efuse_straps_3;
2156 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
2157 u32 rb = cc_rb_backend_disable | (0xf0 << 16);
2158 u32 sp = cc_gc_shader_pipe_config;
2159 u32 gfx = grbm_gfx_index | SE_INDEX(i);
2160 1875
2161 if (i == num_shader_engines) { 1876 WREG32(RCU_IND_INDEX, 0x204);
2162 rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); 1877 efuse_straps_4 = RREG32(RCU_IND_DATA);
2163 sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); 1878 WREG32(RCU_IND_INDEX, 0x203);
1879 efuse_straps_3 = RREG32(RCU_IND_DATA);
1880 tmp = (((efuse_straps_4 & 0xf) << 4) |
1881 ((efuse_straps_3 & 0xf0000000) >> 28));
1882 } else {
1883 tmp = 0;
1884 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
1885 u32 rb_disable_bitmap;
1886
1887 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1888 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1889 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1890 tmp <<= 4;
1891 tmp |= rb_disable_bitmap;
2164 } 1892 }
1893 }
1894 /* enabled rb are just the one not disabled :) */
1895 disabled_rb_mask = tmp;
2165 1896
2166 WREG32(GRBM_GFX_INDEX, gfx); 1897 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2167 WREG32(RLC_GFX_INDEX, gfx); 1898 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2168 1899
2169 WREG32(CC_RB_BACKEND_DISABLE, rb); 1900 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2170 WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); 1901 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2171 WREG32(GC_USER_RB_BACKEND_DISABLE, rb); 1902 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2172 WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
2173 }
2174 1903
2175 grbm_gfx_index |= SE_BROADCAST_WRITES; 1904 tmp = gb_addr_config & NUM_PIPES_MASK;
2176 WREG32(GRBM_GFX_INDEX, grbm_gfx_index); 1905 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
2177 WREG32(RLC_GFX_INDEX, grbm_gfx_index); 1906 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
1907 WREG32(GB_BACKEND_MAP, tmp);
2178 1908
2179 WREG32(CGTS_SYS_TCC_DISABLE, 0); 1909 WREG32(CGTS_SYS_TCC_DISABLE, 0);
2180 WREG32(CGTS_TCC_DISABLE, 0); 1910 WREG32(CGTS_TCC_DISABLE, 0);
@@ -2202,6 +1932,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2202 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); 1932 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2203 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 1933 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2204 1934
1935 if (rdev->family <= CHIP_SUMO2)
1936 WREG32(SMX_SAR_CTL0, 0x00010000);
1937
2205 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | 1938 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2206 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | 1939 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2207 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); 1940 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 4e7dd2b4843d..c16554122ccd 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -52,6 +52,7 @@ struct evergreen_cs_track {
52 u32 cb_color_view[12]; 52 u32 cb_color_view[12];
53 u32 cb_color_pitch[12]; 53 u32 cb_color_pitch[12];
54 u32 cb_color_slice[12]; 54 u32 cb_color_slice[12];
55 u32 cb_color_slice_idx[12];
55 u32 cb_color_attrib[12]; 56 u32 cb_color_attrib[12];
56 u32 cb_color_cmask_slice[8];/* unused */ 57 u32 cb_color_cmask_slice[8];/* unused */
57 u32 cb_color_fmask_slice[8];/* unused */ 58 u32 cb_color_fmask_slice[8];/* unused */
@@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
127 track->cb_color_info[i] = 0; 128 track->cb_color_info[i] = 0;
128 track->cb_color_view[i] = 0xFFFFFFFF; 129 track->cb_color_view[i] = 0xFFFFFFFF;
129 track->cb_color_pitch[i] = 0; 130 track->cb_color_pitch[i] = 0;
130 track->cb_color_slice[i] = 0; 131 track->cb_color_slice[i] = 0xfffffff;
132 track->cb_color_slice_idx[i] = 0;
131 } 133 }
132 track->cb_target_mask = 0xFFFFFFFF; 134 track->cb_target_mask = 0xFFFFFFFF;
133 track->cb_shader_mask = 0xFFFFFFFF; 135 track->cb_shader_mask = 0xFFFFFFFF;
134 track->cb_dirty = true; 136 track->cb_dirty = true;
135 137
138 track->db_depth_slice = 0xffffffff;
136 track->db_depth_view = 0xFFFFC000; 139 track->db_depth_view = 0xFFFFC000;
137 track->db_depth_size = 0xFFFFFFFF; 140 track->db_depth_size = 0xFFFFFFFF;
138 track->db_depth_control = 0xFFFFFFFF; 141 track->db_depth_control = 0xFFFFFFFF;
@@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
250{ 253{
251 struct evergreen_cs_track *track = p->track; 254 struct evergreen_cs_track *track = p->track;
252 unsigned palign, halign, tileb, slice_pt; 255 unsigned palign, halign, tileb, slice_pt;
256 unsigned mtile_pr, mtile_ps, mtileb;
253 257
254 tileb = 64 * surf->bpe * surf->nsamples; 258 tileb = 64 * surf->bpe * surf->nsamples;
255 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
256 palign = MAX(8, palign);
257 slice_pt = 1; 259 slice_pt = 1;
258 if (tileb > surf->tsplit) { 260 if (tileb > surf->tsplit) {
259 slice_pt = tileb / surf->tsplit; 261 slice_pt = tileb / surf->tsplit;
@@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
262 /* macro tile width & height */ 264 /* macro tile width & height */
263 palign = (8 * surf->bankw * track->npipes) * surf->mtilea; 265 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
264 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; 266 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
265 surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt; 267 mtileb = (palign / 8) * (halign / 8) * tileb;;
268 mtile_pr = surf->nbx / palign;
269 mtile_ps = (mtile_pr * surf->nby) / halign;
270 surf->layer_size = mtile_ps * mtileb * slice_pt;
266 surf->base_align = (palign / 8) * (halign / 8) * tileb; 271 surf->base_align = (palign / 8) * (halign / 8) * tileb;
267 surf->palign = palign; 272 surf->palign = palign;
268 surf->halign = halign; 273 surf->halign = halign;
@@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
434 439
435 offset += surf.layer_size * mslice; 440 offset += surf.layer_size * mslice;
436 if (offset > radeon_bo_size(track->cb_color_bo[id])) { 441 if (offset > radeon_bo_size(track->cb_color_bo[id])) {
442 /* old ddx are broken they allocate bo with w*h*bpp but
443 * program slice with ALIGN(h, 8), catch this and patch
444 * command stream.
445 */
446 if (!surf.mode) {
447 volatile u32 *ib = p->ib.ptr;
448 unsigned long tmp, nby, bsize, size, min = 0;
449
450 /* find the height the ddx wants */
451 if (surf.nby > 8) {
452 min = surf.nby - 8;
453 }
454 bsize = radeon_bo_size(track->cb_color_bo[id]);
455 tmp = track->cb_color_bo_offset[id] << 8;
456 for (nby = surf.nby; nby > min; nby--) {
457 size = nby * surf.nbx * surf.bpe * surf.nsamples;
458 if ((tmp + size * mslice) <= bsize) {
459 break;
460 }
461 }
462 if (nby > min) {
463 surf.nby = nby;
464 slice = ((nby * surf.nbx) / 64) - 1;
465 if (!evergreen_surface_check(p, &surf, "cb")) {
466 /* check if this one works */
467 tmp += surf.layer_size * mslice;
468 if (tmp <= bsize) {
469 ib[track->cb_color_slice_idx[id]] = slice;
470 goto old_ddx_ok;
471 }
472 }
473 }
474 }
437 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " 475 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
438 "offset %d, max layer %d, bo size %ld, slice %d)\n", 476 "offset %d, max layer %d, bo size %ld, slice %d)\n",
439 __func__, __LINE__, id, surf.layer_size, 477 __func__, __LINE__, id, surf.layer_size,
@@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
446 surf.tsplit, surf.mtilea); 484 surf.tsplit, surf.mtilea);
447 return -EINVAL; 485 return -EINVAL;
448 } 486 }
487old_ddx_ok:
449 488
450 return 0; 489 return 0;
451} 490}
@@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1532 case CB_COLOR7_SLICE: 1571 case CB_COLOR7_SLICE:
1533 tmp = (reg - CB_COLOR0_SLICE) / 0x3c; 1572 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1534 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); 1573 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1574 track->cb_color_slice_idx[tmp] = idx;
1535 track->cb_dirty = true; 1575 track->cb_dirty = true;
1536 break; 1576 break;
1537 case CB_COLOR8_SLICE: 1577 case CB_COLOR8_SLICE:
@@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1540 case CB_COLOR11_SLICE: 1580 case CB_COLOR11_SLICE:
1541 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; 1581 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1542 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); 1582 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1583 track->cb_color_slice_idx[tmp] = idx;
1543 track->cb_dirty = true; 1584 track->cb_dirty = true;
1544 break; 1585 break;
1545 case CB_COLOR0_ATTRIB: 1586 case CB_COLOR0_ATTRIB:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index a51f880985f8..65c54160028b 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -156,9 +156,6 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
156 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 156 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
157 uint32_t offset; 157 uint32_t offset;
158 158
159 if (ASIC_IS_DCE5(rdev))
160 return;
161
162 /* Silent, r600_hdmi_enable will raise WARN for us */ 159 /* Silent, r600_hdmi_enable will raise WARN for us */
163 if (!dig->afmt->enabled) 160 if (!dig->afmt->enabled)
164 return; 161 return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79130bfd1d6f..b50b15c70498 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -37,6 +37,15 @@
37#define EVERGREEN_MAX_PIPES_MASK 0xFF 37#define EVERGREEN_MAX_PIPES_MASK 0xFF
38#define EVERGREEN_MAX_LDS_NUM 0xFFFF 38#define EVERGREEN_MAX_LDS_NUM 0xFFFF
39 39
40#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
41#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
42#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
43#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
44#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
45#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
46#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
47#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
48
40/* Registers */ 49/* Registers */
41 50
42#define RCU_IND_INDEX 0x100 51#define RCU_IND_INDEX 0x100
@@ -54,6 +63,7 @@
54#define BACKEND_DISABLE(x) ((x) << 16) 63#define BACKEND_DISABLE(x) ((x) << 16)
55#define GB_ADDR_CONFIG 0x98F8 64#define GB_ADDR_CONFIG 0x98F8
56#define NUM_PIPES(x) ((x) << 0) 65#define NUM_PIPES(x) ((x) << 0)
66#define NUM_PIPES_MASK 0x0000000f
57#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) 67#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
58#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) 68#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
59#define NUM_SHADER_ENGINES(x) ((x) << 12) 69#define NUM_SHADER_ENGINES(x) ((x) << 12)
@@ -452,6 +462,7 @@
452#define MC_VM_MD_L1_TLB0_CNTL 0x2654 462#define MC_VM_MD_L1_TLB0_CNTL 0x2654
453#define MC_VM_MD_L1_TLB1_CNTL 0x2658 463#define MC_VM_MD_L1_TLB1_CNTL 0x2658
454#define MC_VM_MD_L1_TLB2_CNTL 0x265C 464#define MC_VM_MD_L1_TLB2_CNTL 0x265C
465#define MC_VM_MD_L1_TLB3_CNTL 0x2698
455 466
456#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C 467#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
457#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 468#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
@@ -492,6 +503,7 @@
492#define SCRATCH_UMSK 0x8540 503#define SCRATCH_UMSK 0x8540
493#define SCRATCH_ADDR 0x8544 504#define SCRATCH_ADDR 0x8544
494 505
506#define SMX_SAR_CTL0 0xA008
495#define SMX_DC_CTL0 0xA020 507#define SMX_DC_CTL0 0xA020
496#define USE_HASH_FUNCTION (1 << 0) 508#define USE_HASH_FUNCTION (1 << 0)
497#define NUMBER_OF_SETS(x) ((x) << 1) 509#define NUMBER_OF_SETS(x) ((x) << 1)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ce4e7cc6c905..b7bf18e40215 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -417,215 +417,17 @@ out:
417/* 417/*
418 * Core functions 418 * Core functions
419 */ 419 */
420static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
421 u32 num_tile_pipes,
422 u32 num_backends_per_asic,
423 u32 *backend_disable_mask_per_asic,
424 u32 num_shader_engines)
425{
426 u32 backend_map = 0;
427 u32 enabled_backends_mask = 0;
428 u32 enabled_backends_count = 0;
429 u32 num_backends_per_se;
430 u32 cur_pipe;
431 u32 swizzle_pipe[CAYMAN_MAX_PIPES];
432 u32 cur_backend = 0;
433 u32 i;
434 bool force_no_swizzle;
435
436 /* force legal values */
437 if (num_tile_pipes < 1)
438 num_tile_pipes = 1;
439 if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
440 num_tile_pipes = rdev->config.cayman.max_tile_pipes;
441 if (num_shader_engines < 1)
442 num_shader_engines = 1;
443 if (num_shader_engines > rdev->config.cayman.max_shader_engines)
444 num_shader_engines = rdev->config.cayman.max_shader_engines;
445 if (num_backends_per_asic < num_shader_engines)
446 num_backends_per_asic = num_shader_engines;
447 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
448 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
449
450 /* make sure we have the same number of backends per se */
451 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
452 /* set up the number of backends per se */
453 num_backends_per_se = num_backends_per_asic / num_shader_engines;
454 if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
455 num_backends_per_se = rdev->config.cayman.max_backends_per_se;
456 num_backends_per_asic = num_backends_per_se * num_shader_engines;
457 }
458
459 /* create enable mask and count for enabled backends */
460 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
461 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
462 enabled_backends_mask |= (1 << i);
463 ++enabled_backends_count;
464 }
465 if (enabled_backends_count == num_backends_per_asic)
466 break;
467 }
468
469 /* force the backends mask to match the current number of backends */
470 if (enabled_backends_count != num_backends_per_asic) {
471 u32 this_backend_enabled;
472 u32 shader_engine;
473 u32 backend_per_se;
474
475 enabled_backends_mask = 0;
476 enabled_backends_count = 0;
477 *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
478 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
479 /* calc the current se */
480 shader_engine = i / rdev->config.cayman.max_backends_per_se;
481 /* calc the backend per se */
482 backend_per_se = i % rdev->config.cayman.max_backends_per_se;
483 /* default to not enabled */
484 this_backend_enabled = 0;
485 if ((shader_engine < num_shader_engines) &&
486 (backend_per_se < num_backends_per_se))
487 this_backend_enabled = 1;
488 if (this_backend_enabled) {
489 enabled_backends_mask |= (1 << i);
490 *backend_disable_mask_per_asic &= ~(1 << i);
491 ++enabled_backends_count;
492 }
493 }
494 }
495
496
497 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
498 switch (rdev->family) {
499 case CHIP_CAYMAN:
500 case CHIP_ARUBA:
501 force_no_swizzle = true;
502 break;
503 default:
504 force_no_swizzle = false;
505 break;
506 }
507 if (force_no_swizzle) {
508 bool last_backend_enabled = false;
509
510 force_no_swizzle = false;
511 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
512 if (((enabled_backends_mask >> i) & 1) == 1) {
513 if (last_backend_enabled)
514 force_no_swizzle = true;
515 last_backend_enabled = true;
516 } else
517 last_backend_enabled = false;
518 }
519 }
520
521 switch (num_tile_pipes) {
522 case 1:
523 case 3:
524 case 5:
525 case 7:
526 DRM_ERROR("odd number of pipes!\n");
527 break;
528 case 2:
529 swizzle_pipe[0] = 0;
530 swizzle_pipe[1] = 1;
531 break;
532 case 4:
533 if (force_no_swizzle) {
534 swizzle_pipe[0] = 0;
535 swizzle_pipe[1] = 1;
536 swizzle_pipe[2] = 2;
537 swizzle_pipe[3] = 3;
538 } else {
539 swizzle_pipe[0] = 0;
540 swizzle_pipe[1] = 2;
541 swizzle_pipe[2] = 1;
542 swizzle_pipe[3] = 3;
543 }
544 break;
545 case 6:
546 if (force_no_swizzle) {
547 swizzle_pipe[0] = 0;
548 swizzle_pipe[1] = 1;
549 swizzle_pipe[2] = 2;
550 swizzle_pipe[3] = 3;
551 swizzle_pipe[4] = 4;
552 swizzle_pipe[5] = 5;
553 } else {
554 swizzle_pipe[0] = 0;
555 swizzle_pipe[1] = 2;
556 swizzle_pipe[2] = 4;
557 swizzle_pipe[3] = 1;
558 swizzle_pipe[4] = 3;
559 swizzle_pipe[5] = 5;
560 }
561 break;
562 case 8:
563 if (force_no_swizzle) {
564 swizzle_pipe[0] = 0;
565 swizzle_pipe[1] = 1;
566 swizzle_pipe[2] = 2;
567 swizzle_pipe[3] = 3;
568 swizzle_pipe[4] = 4;
569 swizzle_pipe[5] = 5;
570 swizzle_pipe[6] = 6;
571 swizzle_pipe[7] = 7;
572 } else {
573 swizzle_pipe[0] = 0;
574 swizzle_pipe[1] = 2;
575 swizzle_pipe[2] = 4;
576 swizzle_pipe[3] = 6;
577 swizzle_pipe[4] = 1;
578 swizzle_pipe[5] = 3;
579 swizzle_pipe[6] = 5;
580 swizzle_pipe[7] = 7;
581 }
582 break;
583 }
584
585 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
586 while (((1 << cur_backend) & enabled_backends_mask) == 0)
587 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
588
589 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
590
591 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
592 }
593
594 return backend_map;
595}
596
597static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
598 u32 disable_mask_per_se,
599 u32 max_disable_mask_per_se,
600 u32 num_shader_engines)
601{
602 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
603 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
604
605 if (num_shader_engines == 1)
606 return disable_mask_per_asic;
607 else if (num_shader_engines == 2)
608 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
609 else
610 return 0xffffffff;
611}
612
613static void cayman_gpu_init(struct radeon_device *rdev) 420static void cayman_gpu_init(struct radeon_device *rdev)
614{ 421{
615 u32 cc_rb_backend_disable = 0;
616 u32 cc_gc_shader_pipe_config;
617 u32 gb_addr_config = 0; 422 u32 gb_addr_config = 0;
618 u32 mc_shared_chmap, mc_arb_ramcfg; 423 u32 mc_shared_chmap, mc_arb_ramcfg;
619 u32 gb_backend_map;
620 u32 cgts_tcc_disable; 424 u32 cgts_tcc_disable;
621 u32 sx_debug_1; 425 u32 sx_debug_1;
622 u32 smx_dc_ctl0; 426 u32 smx_dc_ctl0;
623 u32 gc_user_shader_pipe_config;
624 u32 gc_user_rb_backend_disable;
625 u32 cgts_user_tcc_disable;
626 u32 cgts_sm_ctrl_reg; 427 u32 cgts_sm_ctrl_reg;
627 u32 hdp_host_path_cntl; 428 u32 hdp_host_path_cntl;
628 u32 tmp; 429 u32 tmp;
430 u32 disabled_rb_mask;
629 int i, j; 431 int i, j;
630 432
631 switch (rdev->family) { 433 switch (rdev->family) {
@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
650 rdev->config.cayman.sc_prim_fifo_size = 0x100; 452 rdev->config.cayman.sc_prim_fifo_size = 0x100;
651 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 453 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
652 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 454 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
455 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
653 break; 456 break;
654 case CHIP_ARUBA: 457 case CHIP_ARUBA:
655 default: 458 default:
@@ -657,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
657 rdev->config.cayman.max_pipes_per_simd = 4; 460 rdev->config.cayman.max_pipes_per_simd = 4;
658 rdev->config.cayman.max_tile_pipes = 2; 461 rdev->config.cayman.max_tile_pipes = 2;
659 if ((rdev->pdev->device == 0x9900) || 462 if ((rdev->pdev->device == 0x9900) ||
660 (rdev->pdev->device == 0x9901)) { 463 (rdev->pdev->device == 0x9901) ||
464 (rdev->pdev->device == 0x9905) ||
465 (rdev->pdev->device == 0x9906) ||
466 (rdev->pdev->device == 0x9907) ||
467 (rdev->pdev->device == 0x9908) ||
468 (rdev->pdev->device == 0x9909) ||
469 (rdev->pdev->device == 0x9910) ||
470 (rdev->pdev->device == 0x9917)) {
661 rdev->config.cayman.max_simds_per_se = 6; 471 rdev->config.cayman.max_simds_per_se = 6;
662 rdev->config.cayman.max_backends_per_se = 2; 472 rdev->config.cayman.max_backends_per_se = 2;
663 } else if ((rdev->pdev->device == 0x9903) || 473 } else if ((rdev->pdev->device == 0x9903) ||
664 (rdev->pdev->device == 0x9904)) { 474 (rdev->pdev->device == 0x9904) ||
475 (rdev->pdev->device == 0x990A) ||
476 (rdev->pdev->device == 0x9913) ||
477 (rdev->pdev->device == 0x9918)) {
665 rdev->config.cayman.max_simds_per_se = 4; 478 rdev->config.cayman.max_simds_per_se = 4;
666 rdev->config.cayman.max_backends_per_se = 2; 479 rdev->config.cayman.max_backends_per_se = 2;
667 } else if ((rdev->pdev->device == 0x9990) || 480 } else if ((rdev->pdev->device == 0x9919) ||
668 (rdev->pdev->device == 0x9991)) { 481 (rdev->pdev->device == 0x9990) ||
482 (rdev->pdev->device == 0x9991) ||
483 (rdev->pdev->device == 0x9994) ||
484 (rdev->pdev->device == 0x99A0)) {
669 rdev->config.cayman.max_simds_per_se = 3; 485 rdev->config.cayman.max_simds_per_se = 3;
670 rdev->config.cayman.max_backends_per_se = 1; 486 rdev->config.cayman.max_backends_per_se = 1;
671 } else { 487 } else {
@@ -687,6 +503,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
687 rdev->config.cayman.sc_prim_fifo_size = 0x40; 503 rdev->config.cayman.sc_prim_fifo_size = 0x40;
688 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 504 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
689 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 505 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
506 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
690 break; 507 break;
691 } 508 }
692 509
@@ -706,39 +523,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
706 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 523 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
707 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 524 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
708 525
709 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
710 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
711 cgts_tcc_disable = 0xffff0000;
712 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
713 cgts_tcc_disable &= ~(1 << (16 + i));
714 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
715 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
716 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
717
718 rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
719 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
720 rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
721 rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
722 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
723 rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
724 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
725 rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
726 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
727 rdev->config.cayman.backend_disable_mask_per_asic =
728 cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
729 rdev->config.cayman.num_shader_engines);
730 rdev->config.cayman.backend_map =
731 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
732 rdev->config.cayman.num_backends_per_se *
733 rdev->config.cayman.num_shader_engines,
734 &rdev->config.cayman.backend_disable_mask_per_asic,
735 rdev->config.cayman.num_shader_engines);
736 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
737 rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
738 tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
739 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
740 if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
741 rdev->config.cayman.mem_max_burst_length_bytes = 512;
742 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 526 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
743 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 527 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
744 if (rdev->config.cayman.mem_row_size_in_kb > 4) 528 if (rdev->config.cayman.mem_row_size_in_kb > 4)
@@ -748,73 +532,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
748 rdev->config.cayman.num_gpus = 1; 532 rdev->config.cayman.num_gpus = 1;
749 rdev->config.cayman.multi_gpu_tile_size = 64; 533 rdev->config.cayman.multi_gpu_tile_size = 64;
750 534
751 //gb_addr_config = 0x02011003
752#if 0
753 gb_addr_config = RREG32(GB_ADDR_CONFIG);
754#else
755 gb_addr_config = 0;
756 switch (rdev->config.cayman.num_tile_pipes) {
757 case 1:
758 default:
759 gb_addr_config |= NUM_PIPES(0);
760 break;
761 case 2:
762 gb_addr_config |= NUM_PIPES(1);
763 break;
764 case 4:
765 gb_addr_config |= NUM_PIPES(2);
766 break;
767 case 8:
768 gb_addr_config |= NUM_PIPES(3);
769 break;
770 }
771
772 tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
773 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
774 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
775 tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
776 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
777 switch (rdev->config.cayman.num_gpus) {
778 case 1:
779 default:
780 gb_addr_config |= NUM_GPUS(0);
781 break;
782 case 2:
783 gb_addr_config |= NUM_GPUS(1);
784 break;
785 case 4:
786 gb_addr_config |= NUM_GPUS(2);
787 break;
788 }
789 switch (rdev->config.cayman.multi_gpu_tile_size) {
790 case 16:
791 gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
792 break;
793 case 32:
794 default:
795 gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
796 break;
797 case 64:
798 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
799 break;
800 case 128:
801 gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
802 break;
803 }
804 switch (rdev->config.cayman.mem_row_size_in_kb) {
805 case 1:
806 default:
807 gb_addr_config |= ROW_SIZE(0);
808 break;
809 case 2:
810 gb_addr_config |= ROW_SIZE(1);
811 break;
812 case 4:
813 gb_addr_config |= ROW_SIZE(2);
814 break;
815 }
816#endif
817
818 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 535 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
819 rdev->config.cayman.num_tile_pipes = (1 << tmp); 536 rdev->config.cayman.num_tile_pipes = (1 << tmp);
820 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 537 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
@@ -828,17 +545,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
828 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 545 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
829 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 546 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
830 547
831 //gb_backend_map = 0x76541032; 548
832#if 0
833 gb_backend_map = RREG32(GB_BACKEND_MAP);
834#else
835 gb_backend_map =
836 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
837 rdev->config.cayman.num_backends_per_se *
838 rdev->config.cayman.num_shader_engines,
839 &rdev->config.cayman.backend_disable_mask_per_asic,
840 rdev->config.cayman.num_shader_engines);
841#endif
842 /* setup tiling info dword. gb_addr_config is not adequate since it does 549 /* setup tiling info dword. gb_addr_config is not adequate since it does
843 * not have bank info, so create a custom tiling dword. 550 * not have bank info, so create a custom tiling dword.
844 * bits 3:0 num_pipes 551 * bits 3:0 num_pipes
@@ -866,33 +573,49 @@ static void cayman_gpu_init(struct radeon_device *rdev)
866 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 573 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
867 if (rdev->flags & RADEON_IS_IGP) 574 if (rdev->flags & RADEON_IS_IGP)
868 rdev->config.cayman.tile_config |= 1 << 4; 575 rdev->config.cayman.tile_config |= 1 << 4;
869 else 576 else {
870 rdev->config.cayman.tile_config |= 577 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
871 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 578 rdev->config.cayman.tile_config |= 1 << 4;
579 else
580 rdev->config.cayman.tile_config |= 0 << 4;
581 }
872 rdev->config.cayman.tile_config |= 582 rdev->config.cayman.tile_config |=
873 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 583 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
874 rdev->config.cayman.tile_config |= 584 rdev->config.cayman.tile_config |=
875 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 585 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
876 586
877 rdev->config.cayman.backend_map = gb_backend_map; 587 tmp = 0;
878 WREG32(GB_BACKEND_MAP, gb_backend_map); 588 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
589 u32 rb_disable_bitmap;
590
591 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
592 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
593 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
594 tmp <<= 4;
595 tmp |= rb_disable_bitmap;
596 }
597 /* enabled rb are just the one not disabled :) */
598 disabled_rb_mask = tmp;
599
600 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
601 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
602
879 WREG32(GB_ADDR_CONFIG, gb_addr_config); 603 WREG32(GB_ADDR_CONFIG, gb_addr_config);
880 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 604 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
881 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 605 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
882 606
883 /* primary versions */ 607 tmp = gb_addr_config & NUM_PIPES_MASK;
884 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 608 tmp = r6xx_remap_render_backend(rdev, tmp,
885 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 609 rdev->config.cayman.max_backends_per_se *
886 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 610 rdev->config.cayman.max_shader_engines,
611 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
612 WREG32(GB_BACKEND_MAP, tmp);
887 613
614 cgts_tcc_disable = 0xffff0000;
615 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
616 cgts_tcc_disable &= ~(1 << (16 + i));
888 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 617 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
889 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 618 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
890
891 /* user versions */
892 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
893 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
894 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
895
896 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 619 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
897 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 620 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
898 621
@@ -1580,6 +1303,10 @@ static int cayman_startup(struct radeon_device *rdev)
1580 if (r) 1303 if (r)
1581 return r; 1304 return r;
1582 1305
1306 r = r600_audio_init(rdev);
1307 if (r)
1308 return r;
1309
1583 return 0; 1310 return 0;
1584} 1311}
1585 1312
@@ -1606,6 +1333,7 @@ int cayman_resume(struct radeon_device *rdev)
1606 1333
1607int cayman_suspend(struct radeon_device *rdev) 1334int cayman_suspend(struct radeon_device *rdev)
1608{ 1335{
1336 r600_audio_fini(rdev);
1609 /* FIXME: we should wait for ring to be empty */ 1337 /* FIXME: we should wait for ring to be empty */
1610 radeon_ib_pool_suspend(rdev); 1338 radeon_ib_pool_suspend(rdev);
1611 radeon_vm_manager_suspend(rdev); 1339 radeon_vm_manager_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2aa7046ada56..a0b98066e207 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -41,6 +41,9 @@
41#define CAYMAN_MAX_TCC 16 41#define CAYMAN_MAX_TCC 16
42#define CAYMAN_MAX_TCC_MASK 0xFF 42#define CAYMAN_MAX_TCC_MASK 0xFF
43 43
44#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
45#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
46
44#define DMIF_ADDR_CONFIG 0xBD4 47#define DMIF_ADDR_CONFIG 0xBD4
45#define SRBM_GFX_CNTL 0x0E44 48#define SRBM_GFX_CNTL 0x0E44
46#define RINGID(x) (((x) & 0x3) << 0) 49#define RINGID(x) (((x) & 0x3) << 0)
@@ -148,6 +151,8 @@
148#define CGTS_SYS_TCC_DISABLE 0x3F90 151#define CGTS_SYS_TCC_DISABLE 0x3F90
149#define CGTS_USER_SYS_TCC_DISABLE 0x3F94 152#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
150 153
154#define RLC_GFX_INDEX 0x3FC4
155
151#define CONFIG_MEMSIZE 0x5428 156#define CONFIG_MEMSIZE 0x5428
152 157
153#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 158#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
@@ -212,6 +217,12 @@
212#define SOFT_RESET_VGT (1 << 14) 217#define SOFT_RESET_VGT (1 << 14)
213#define SOFT_RESET_IA (1 << 15) 218#define SOFT_RESET_IA (1 << 15)
214 219
220#define GRBM_GFX_INDEX 0x802C
221#define INSTANCE_INDEX(x) ((x) << 0)
222#define SE_INDEX(x) ((x) << 16)
223#define INSTANCE_BROADCAST_WRITES (1 << 30)
224#define SE_BROADCAST_WRITES (1 << 31)
225
215#define SCRATCH_REG0 0x8500 226#define SCRATCH_REG0 0x8500
216#define SCRATCH_REG1 0x8504 227#define SCRATCH_REG1 0x8504
217#define SCRATCH_REG2 0x8508 228#define SCRATCH_REG2 0x8508
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f388a1d73b63..bff627293812 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
1376 return r600_gpu_soft_reset(rdev); 1376 return r600_gpu_soft_reset(rdev);
1377} 1377}
1378 1378
1379static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 1379u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1380 u32 num_backends, 1380 u32 tiling_pipe_num,
1381 u32 backend_disable_mask) 1381 u32 max_rb_num,
1382{ 1382 u32 total_max_rb_num,
1383 u32 backend_map = 0; 1383 u32 disabled_rb_mask)
1384 u32 enabled_backends_mask; 1384{
1385 u32 enabled_backends_count; 1385 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1386 u32 cur_pipe; 1386 u32 pipe_rb_ratio, pipe_rb_remain;
1387 u32 swizzle_pipe[R6XX_MAX_PIPES]; 1387 u32 data = 0, mask = 1 << (max_rb_num - 1);
1388 u32 cur_backend; 1388 unsigned i, j;
1389 u32 i; 1389
1390 1390 /* mask out the RBs that don't exist on that asic */
1391 if (num_tile_pipes > R6XX_MAX_PIPES) 1391 disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
1392 num_tile_pipes = R6XX_MAX_PIPES; 1392
1393 if (num_tile_pipes < 1) 1393 rendering_pipe_num = 1 << tiling_pipe_num;
1394 num_tile_pipes = 1; 1394 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1395 if (num_backends > R6XX_MAX_BACKENDS) 1395 BUG_ON(rendering_pipe_num < req_rb_num);
1396 num_backends = R6XX_MAX_BACKENDS; 1396
1397 if (num_backends < 1) 1397 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1398 num_backends = 1; 1398 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1399 1399
1400 enabled_backends_mask = 0; 1400 if (rdev->family <= CHIP_RV740) {
1401 enabled_backends_count = 0; 1401 /* r6xx/r7xx */
1402 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) { 1402 rb_num_width = 2;
1403 if (((backend_disable_mask >> i) & 1) == 0) { 1403 } else {
1404 enabled_backends_mask |= (1 << i); 1404 /* eg+ */
1405 ++enabled_backends_count; 1405 rb_num_width = 4;
1406 }
1407 if (enabled_backends_count == num_backends)
1408 break;
1409 }
1410
1411 if (enabled_backends_count == 0) {
1412 enabled_backends_mask = 1;
1413 enabled_backends_count = 1;
1414 }
1415
1416 if (enabled_backends_count != num_backends)
1417 num_backends = enabled_backends_count;
1418
1419 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1420 switch (num_tile_pipes) {
1421 case 1:
1422 swizzle_pipe[0] = 0;
1423 break;
1424 case 2:
1425 swizzle_pipe[0] = 0;
1426 swizzle_pipe[1] = 1;
1427 break;
1428 case 3:
1429 swizzle_pipe[0] = 0;
1430 swizzle_pipe[1] = 1;
1431 swizzle_pipe[2] = 2;
1432 break;
1433 case 4:
1434 swizzle_pipe[0] = 0;
1435 swizzle_pipe[1] = 1;
1436 swizzle_pipe[2] = 2;
1437 swizzle_pipe[3] = 3;
1438 break;
1439 case 5:
1440 swizzle_pipe[0] = 0;
1441 swizzle_pipe[1] = 1;
1442 swizzle_pipe[2] = 2;
1443 swizzle_pipe[3] = 3;
1444 swizzle_pipe[4] = 4;
1445 break;
1446 case 6:
1447 swizzle_pipe[0] = 0;
1448 swizzle_pipe[1] = 2;
1449 swizzle_pipe[2] = 4;
1450 swizzle_pipe[3] = 5;
1451 swizzle_pipe[4] = 1;
1452 swizzle_pipe[5] = 3;
1453 break;
1454 case 7:
1455 swizzle_pipe[0] = 0;
1456 swizzle_pipe[1] = 2;
1457 swizzle_pipe[2] = 4;
1458 swizzle_pipe[3] = 6;
1459 swizzle_pipe[4] = 1;
1460 swizzle_pipe[5] = 3;
1461 swizzle_pipe[6] = 5;
1462 break;
1463 case 8:
1464 swizzle_pipe[0] = 0;
1465 swizzle_pipe[1] = 2;
1466 swizzle_pipe[2] = 4;
1467 swizzle_pipe[3] = 6;
1468 swizzle_pipe[4] = 1;
1469 swizzle_pipe[5] = 3;
1470 swizzle_pipe[6] = 5;
1471 swizzle_pipe[7] = 7;
1472 break;
1473 } 1406 }
1474 1407
1475 cur_backend = 0; 1408 for (i = 0; i < max_rb_num; i++) {
1476 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 1409 if (!(mask & disabled_rb_mask)) {
1477 while (((1 << cur_backend) & enabled_backends_mask) == 0) 1410 for (j = 0; j < pipe_rb_ratio; j++) {
1478 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; 1411 data <<= rb_num_width;
1479 1412 data |= max_rb_num - i - 1;
1480 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); 1413 }
1481 1414 if (pipe_rb_remain) {
1482 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; 1415 data <<= rb_num_width;
1416 data |= max_rb_num - i - 1;
1417 pipe_rb_remain--;
1418 }
1419 }
1420 mask >>= 1;
1483 } 1421 }
1484 1422
1485 return backend_map; 1423 return data;
1486} 1424}
1487 1425
1488int r600_count_pipe_bits(uint32_t val) 1426int r600_count_pipe_bits(uint32_t val)
@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
1500{ 1438{
1501 u32 tiling_config; 1439 u32 tiling_config;
1502 u32 ramcfg; 1440 u32 ramcfg;
1503 u32 backend_map;
1504 u32 cc_rb_backend_disable; 1441 u32 cc_rb_backend_disable;
1505 u32 cc_gc_shader_pipe_config; 1442 u32 cc_gc_shader_pipe_config;
1506 u32 tmp; 1443 u32 tmp;
@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
1511 u32 sq_thread_resource_mgmt = 0; 1448 u32 sq_thread_resource_mgmt = 0;
1512 u32 sq_stack_resource_mgmt_1 = 0; 1449 u32 sq_stack_resource_mgmt_1 = 0;
1513 u32 sq_stack_resource_mgmt_2 = 0; 1450 u32 sq_stack_resource_mgmt_2 = 0;
1451 u32 disabled_rb_mask;
1514 1452
1515 /* FIXME: implement */ 1453 rdev->config.r600.tiling_group_size = 256;
1516 switch (rdev->family) { 1454 switch (rdev->family) {
1517 case CHIP_R600: 1455 case CHIP_R600:
1518 rdev->config.r600.max_pipes = 4; 1456 rdev->config.r600.max_pipes = 4;
@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1616 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1554 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1617 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1555 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1618 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 1556 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1619 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) 1557
1620 rdev->config.r600.tiling_group_size = 512;
1621 else
1622 rdev->config.r600.tiling_group_size = 256;
1623 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 1558 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1624 if (tmp > 3) { 1559 if (tmp > 3) {
1625 tiling_config |= ROW_TILING(3); 1560 tiling_config |= ROW_TILING(3);
@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
1631 tiling_config |= BANK_SWAPS(1); 1566 tiling_config |= BANK_SWAPS(1);
1632 1567
1633 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; 1568 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1634 cc_rb_backend_disable |= 1569 tmp = R6XX_MAX_BACKENDS -
1635 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); 1570 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1636 1571 if (tmp < rdev->config.r600.max_backends) {
1637 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; 1572 rdev->config.r600.max_backends = tmp;
1638 cc_gc_shader_pipe_config |= 1573 }
1639 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); 1574
1640 cc_gc_shader_pipe_config |= 1575 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1641 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); 1576 tmp = R6XX_MAX_PIPES -
1642 1577 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1643 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, 1578 if (tmp < rdev->config.r600.max_pipes) {
1644 (R6XX_MAX_BACKENDS - 1579 rdev->config.r600.max_pipes = tmp;
1645 r600_count_pipe_bits((cc_rb_backend_disable & 1580 }
1646 R6XX_MAX_BACKENDS_MASK) >> 16)), 1581 tmp = R6XX_MAX_SIMDS -
1647 (cc_rb_backend_disable >> 16)); 1582 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1583 if (tmp < rdev->config.r600.max_simds) {
1584 rdev->config.r600.max_simds = tmp;
1585 }
1586
1587 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1588 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1589 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1590 R6XX_MAX_BACKENDS, disabled_rb_mask);
1591 tiling_config |= tmp << 16;
1592 rdev->config.r600.backend_map = tmp;
1593
1648 rdev->config.r600.tile_config = tiling_config; 1594 rdev->config.r600.tile_config = tiling_config;
1649 rdev->config.r600.backend_map = backend_map;
1650 tiling_config |= BACKEND_MAP(backend_map);
1651 WREG32(GB_TILING_CONFIG, tiling_config); 1595 WREG32(GB_TILING_CONFIG, tiling_config);
1652 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1596 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1653 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1597 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1654 1598
1655 /* Setup pipes */
1656 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1657 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1658 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1659
1660 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 1599 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1661 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1600 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1662 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 1601 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1900,6 +1839,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1900 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 1839 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1901 NUM_CLIP_SEQ(3))); 1840 NUM_CLIP_SEQ(3)));
1902 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); 1841 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1842 WREG32(VC_ENHANCE, 0);
1903} 1843}
1904 1844
1905 1845
@@ -2487,6 +2427,12 @@ int r600_startup(struct radeon_device *rdev)
2487 if (r) 2427 if (r)
2488 return r; 2428 return r;
2489 2429
2430 r = r600_audio_init(rdev);
2431 if (r) {
2432 DRM_ERROR("radeon: audio init failed\n");
2433 return r;
2434 }
2435
2490 return 0; 2436 return 0;
2491} 2437}
2492 2438
@@ -2523,12 +2469,6 @@ int r600_resume(struct radeon_device *rdev)
2523 return r; 2469 return r;
2524 } 2470 }
2525 2471
2526 r = r600_audio_init(rdev);
2527 if (r) {
2528 DRM_ERROR("radeon: audio resume failed\n");
2529 return r;
2530 }
2531
2532 return r; 2472 return r;
2533} 2473}
2534 2474
@@ -2638,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
2638 rdev->accel_working = false; 2578 rdev->accel_working = false;
2639 } 2579 }
2640 2580
2641 r = r600_audio_init(rdev);
2642 if (r)
2643 return r; /* TODO error handling */
2644 return 0; 2581 return 0;
2645} 2582}
2646 2583
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 7c4fa77f018f..79b55916cf90 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,7 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
57 */ 57 */
58static int r600_audio_chipset_supported(struct radeon_device *rdev) 58static int r600_audio_chipset_supported(struct radeon_device *rdev)
59{ 59{
60 return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev)) 60 return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
61 || rdev->family == CHIP_RS600 61 || rdev->family == CHIP_RS600
62 || rdev->family == CHIP_RS690 62 || rdev->family == CHIP_RS690
63 || rdev->family == CHIP_RS740; 63 || rdev->family == CHIP_RS740;
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
192 struct radeon_device *rdev = dev->dev_private; 192 struct radeon_device *rdev = dev->dev_private;
193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
194 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 194 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
195 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
195 int base_rate = 48000; 196 int base_rate = 48000;
196 197
197 switch (radeon_encoder->encoder_id) { 198 switch (radeon_encoder->encoder_id) {
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
217 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); 218 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
218 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); 219 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
219 220
220 /* Some magic trigger or src sel? */ 221 /* Select DTO source */
221 WREG32_P(0x5ac, 0x01, ~0x77); 222 WREG32(0x5ac, radeon_crtc->crtc_id);
222 } else { 223 } else {
223 switch (dig->dig_encoder) { 224 switch (dig->dig_encoder) {
224 case 0: 225 case 0:
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0133f5f09bd6..ca87f7afaf23 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2079,6 +2079,48 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2079 return -EINVAL; 2079 return -EINVAL;
2080 } 2080 }
2081 break; 2081 break;
2082 case PACKET3_STRMOUT_BASE_UPDATE:
2083 if (p->family < CHIP_RV770) {
2084 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2085 return -EINVAL;
2086 }
2087 if (pkt->count != 1) {
2088 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2089 return -EINVAL;
2090 }
2091 if (idx_value > 3) {
2092 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2093 return -EINVAL;
2094 }
2095 {
2096 u64 offset;
2097
2098 r = r600_cs_packet_next_reloc(p, &reloc);
2099 if (r) {
2100 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2101 return -EINVAL;
2102 }
2103
2104 if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
2105 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2106 return -EINVAL;
2107 }
2108
2109 offset = radeon_get_ib_value(p, idx+1) << 8;
2110 if (offset != track->vgt_strmout_bo_offset[idx_value]) {
2111 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
2112 offset, track->vgt_strmout_bo_offset[idx_value]);
2113 return -EINVAL;
2114 }
2115
2116 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2117 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
2118 offset + 4, radeon_bo_size(reloc->robj));
2119 return -EINVAL;
2120 }
2121 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2122 }
2123 break;
2082 case PACKET3_SURFACE_BASE_UPDATE: 2124 case PACKET3_SURFACE_BASE_UPDATE:
2083 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { 2125 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2084 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2126 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 226379e00ac1..82a0a4c919c0 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -322,9 +322,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
322 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 322 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
323 uint32_t offset; 323 uint32_t offset;
324 324
325 if (ASIC_IS_DCE5(rdev))
326 return;
327
328 /* Silent, r600_hdmi_enable will raise WARN for us */ 325 /* Silent, r600_hdmi_enable will raise WARN for us */
329 if (!dig->afmt->enabled) 326 if (!dig->afmt->enabled)
330 return; 327 return;
@@ -348,7 +345,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
348 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 345 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
349 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ 346 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
350 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 347 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
351 HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
352 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ 348 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
353 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 349 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
354 } 350 }
@@ -484,7 +480,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
484 uint32_t offset; 480 uint32_t offset;
485 u32 hdmi; 481 u32 hdmi;
486 482
487 if (ASIC_IS_DCE5(rdev)) 483 if (ASIC_IS_DCE6(rdev))
488 return; 484 return;
489 485
490 /* Silent, r600_hdmi_enable will raise WARN for us */ 486 /* Silent, r600_hdmi_enable will raise WARN for us */
@@ -544,7 +540,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
544 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 540 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
545 uint32_t offset; 541 uint32_t offset;
546 542
547 if (ASIC_IS_DCE5(rdev)) 543 if (ASIC_IS_DCE6(rdev))
548 return; 544 return;
549 545
550 /* Called for ATOM_ENCODER_MODE_HDMI only */ 546 /* Called for ATOM_ENCODER_MODE_HDMI only */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 15bd3b216243..025fd5b6c08c 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -219,6 +219,8 @@
219#define BACKEND_MAP(x) ((x) << 16) 219#define BACKEND_MAP(x) ((x) << 16)
220 220
221#define GB_TILING_CONFIG 0x98F0 221#define GB_TILING_CONFIG 0x98F0
222#define PIPE_TILING__SHIFT 1
223#define PIPE_TILING__MASK 0x0000000e
222 224
223#define GC_USER_SHADER_PIPE_CONFIG 0x8954 225#define GC_USER_SHADER_PIPE_CONFIG 0x8954
224#define INACTIVE_QD_PIPES(x) ((x) << 8) 226#define INACTIVE_QD_PIPES(x) ((x) << 8)
@@ -483,6 +485,7 @@
483#define TC_L2_SIZE(x) ((x)<<5) 485#define TC_L2_SIZE(x) ((x)<<5)
484#define L2_DISABLE_LATE_HIT (1<<9) 486#define L2_DISABLE_LATE_HIT (1<<9)
485 487
488#define VC_ENHANCE 0x9714
486 489
487#define VGT_CACHE_INVALIDATION 0x88C4 490#define VGT_CACHE_INVALIDATION 0x88C4
488#define CACHE_INVALIDATION(x) ((x)<<0) 491#define CACHE_INVALIDATION(x) ((x)<<0)
@@ -1161,6 +1164,7 @@
1161#define PACKET3_SET_CTL_CONST 0x6F 1164#define PACKET3_SET_CTL_CONST 0x6F
1162#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0 1165#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
1163#define PACKET3_SET_CTL_CONST_END 0x0003e200 1166#define PACKET3_SET_CTL_CONST_END 0x0003e200
1167#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
1164#define PACKET3_SURFACE_BASE_UPDATE 0x73 1168#define PACKET3_SURFACE_BASE_UPDATE 0x73
1165 1169
1166 1170
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2e24022b389a..fefcca55c1eb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1374,9 +1374,9 @@ struct cayman_asic {
1374 1374
1375struct si_asic { 1375struct si_asic {
1376 unsigned max_shader_engines; 1376 unsigned max_shader_engines;
1377 unsigned max_pipes_per_simd;
1378 unsigned max_tile_pipes; 1377 unsigned max_tile_pipes;
1379 unsigned max_simds_per_se; 1378 unsigned max_cu_per_sh;
1379 unsigned max_sh_per_se;
1380 unsigned max_backends_per_se; 1380 unsigned max_backends_per_se;
1381 unsigned max_texture_channel_caches; 1381 unsigned max_texture_channel_caches;
1382 unsigned max_gprs; 1382 unsigned max_gprs;
@@ -1387,7 +1387,6 @@ struct si_asic {
1387 unsigned sc_hiz_tile_fifo_size; 1387 unsigned sc_hiz_tile_fifo_size;
1388 unsigned sc_earlyz_tile_fifo_size; 1388 unsigned sc_earlyz_tile_fifo_size;
1389 1389
1390 unsigned num_shader_engines;
1391 unsigned num_tile_pipes; 1390 unsigned num_tile_pipes;
1392 unsigned num_backends_per_se; 1391 unsigned num_backends_per_se;
1393 unsigned backend_disable_mask_per_asic; 1392 unsigned backend_disable_mask_per_asic;
@@ -1848,6 +1847,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
1848extern void r600_hdmi_enable(struct drm_encoder *encoder); 1847extern void r600_hdmi_enable(struct drm_encoder *encoder);
1849extern void r600_hdmi_disable(struct drm_encoder *encoder); 1848extern void r600_hdmi_disable(struct drm_encoder *encoder);
1850extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1849extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1850extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1851 u32 tiling_pipe_num,
1852 u32 max_rb_num,
1853 u32 total_max_rb_num,
1854 u32 enabled_rb_mask);
1851 1855
1852/* 1856/*
1853 * evergreen functions used by radeon_encoder.c 1857 * evergreen functions used by radeon_encoder.c
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0137689ed461..142f89462aa4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
147 sync_to_ring, p->ring); 147 sync_to_ring, p->ring);
148} 148}
149 149
150/* XXX: note that this is called from the legacy UMS CS ioctl as well */
150int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 151int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
151{ 152{
152 struct drm_radeon_cs *cs = data; 153 struct drm_radeon_cs *cs = data;
@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
245 } 246 }
246 } 247 }
247 248
248 if ((p->cs_flags & RADEON_CS_USE_VM) && 249 /* these are KMS only */
249 !p->rdev->vm_manager.enabled) { 250 if (p->rdev) {
250 DRM_ERROR("VM not active on asic!\n"); 251 if ((p->cs_flags & RADEON_CS_USE_VM) &&
251 return -EINVAL; 252 !p->rdev->vm_manager.enabled) {
252 } 253 DRM_ERROR("VM not active on asic!\n");
253 254 return -EINVAL;
254 /* we only support VM on SI+ */ 255 }
255 if ((p->rdev->family >= CHIP_TAHITI) &&
256 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
257 DRM_ERROR("VM required on SI+!\n");
258 return -EINVAL;
259 }
260 256
261 if (radeon_cs_get_ring(p, ring, priority)) 257 /* we only support VM on SI+ */
262 return -EINVAL; 258 if ((p->rdev->family >= CHIP_TAHITI) &&
259 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
260 DRM_ERROR("VM required on SI+!\n");
261 return -EINVAL;
262 }
263 263
264 if (radeon_cs_get_ring(p, ring, priority))
265 return -EINVAL;
266 }
264 267
265 /* deal with non-vm */ 268 /* deal with non-vm */
266 if ((p->chunk_ib_idx != -1) && 269 if ((p->chunk_ib_idx != -1) &&
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f0bb2b543b13..2c4d53fd20c5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -57,9 +57,11 @@
57 * 2.13.0 - virtual memory support, streamout 57 * 2.13.0 - virtual memory support, streamout
58 * 2.14.0 - add evergreen tiling informations 58 * 2.14.0 - add evergreen tiling informations
59 * 2.15.0 - add max_pipes query 59 * 2.15.0 - add max_pipes query
60 * 2.16.0 - fix evergreen 2D tiled surface calculation
61 * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
60 */ 62 */
61#define KMS_DRIVER_MAJOR 2 63#define KMS_DRIVER_MAJOR 2
62#define KMS_DRIVER_MINOR 15 64#define KMS_DRIVER_MINOR 17
63#define KMS_DRIVER_PATCHLEVEL 0 65#define KMS_DRIVER_PATCHLEVEL 0
64int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 66int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
65int radeon_driver_unload_kms(struct drm_device *dev); 67int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 79db56e6c2ac..59d44937dd9f 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -476,12 +476,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
476 476
477 mutex_lock(&vm->mutex); 477 mutex_lock(&vm->mutex);
478 if (last_pfn > vm->last_pfn) { 478 if (last_pfn > vm->last_pfn) {
479 /* grow va space 32M by 32M */ 479 /* release mutex and lock in right order */
480 unsigned align = ((32 << 20) >> 12) - 1; 480 mutex_unlock(&vm->mutex);
481 radeon_mutex_lock(&rdev->cs_mutex); 481 radeon_mutex_lock(&rdev->cs_mutex);
482 radeon_vm_unbind_locked(rdev, vm); 482 mutex_lock(&vm->mutex);
483 /* and check again */
484 if (last_pfn > vm->last_pfn) {
485 /* grow va space 32M by 32M */
486 unsigned align = ((32 << 20) >> 12) - 1;
487 radeon_vm_unbind_locked(rdev, vm);
488 vm->last_pfn = (last_pfn + align) & ~align;
489 }
483 radeon_mutex_unlock(&rdev->cs_mutex); 490 radeon_mutex_unlock(&rdev->cs_mutex);
484 vm->last_pfn = (last_pfn + align) & ~align;
485 } 491 }
486 head = &vm->va; 492 head = &vm->va;
487 last_offset = 0; 493 last_offset = 0;
@@ -595,8 +601,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
595 if (bo_va == NULL) 601 if (bo_va == NULL)
596 return 0; 602 return 0;
597 603
598 mutex_lock(&vm->mutex);
599 radeon_mutex_lock(&rdev->cs_mutex); 604 radeon_mutex_lock(&rdev->cs_mutex);
605 mutex_lock(&vm->mutex);
600 radeon_vm_bo_update_pte(rdev, vm, bo, NULL); 606 radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
601 radeon_mutex_unlock(&rdev->cs_mutex); 607 radeon_mutex_unlock(&rdev->cs_mutex);
602 list_del(&bo_va->vm_list); 608 list_del(&bo_va->vm_list);
@@ -641,9 +647,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
641 struct radeon_bo_va *bo_va, *tmp; 647 struct radeon_bo_va *bo_va, *tmp;
642 int r; 648 int r;
643 649
644 mutex_lock(&vm->mutex);
645
646 radeon_mutex_lock(&rdev->cs_mutex); 650 radeon_mutex_lock(&rdev->cs_mutex);
651 mutex_lock(&vm->mutex);
647 radeon_vm_unbind_locked(rdev, vm); 652 radeon_vm_unbind_locked(rdev, vm);
648 radeon_mutex_unlock(&rdev->cs_mutex); 653 radeon_mutex_unlock(&rdev->cs_mutex);
649 654
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f1016a5820d1..5c58d7d90cb2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
273 break; 273 break;
274 case RADEON_INFO_MAX_PIPES: 274 case RADEON_INFO_MAX_PIPES:
275 if (rdev->family >= CHIP_TAHITI) 275 if (rdev->family >= CHIP_TAHITI)
276 value = rdev->config.si.max_pipes_per_simd; 276 value = rdev->config.si.max_cu_per_sh;
277 else if (rdev->family >= CHIP_CAYMAN) 277 else if (rdev->family >= CHIP_CAYMAN)
278 value = rdev->config.cayman.max_pipes_per_simd; 278 value = rdev->config.cayman.max_pipes_per_simd;
279 else if (rdev->family >= CHIP_CEDAR) 279 else if (rdev->family >= CHIP_CEDAR)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 08825548ee69..5b37e283ec38 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -801,9 +801,13 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
801 int i; 801 int i;
802 802
803 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 803 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
804 not_processed += radeon_fence_count_emitted(rdev, i); 804 struct radeon_ring *ring = &rdev->ring[i];
805 if (not_processed >= 3) 805
806 break; 806 if (ring->ready) {
807 not_processed += radeon_fence_count_emitted(rdev, i);
808 if (not_processed >= 3)
809 break;
810 }
807 } 811 }
808 812
809 if (not_processed >= 3) { /* should upclock */ 813 if (not_processed >= 3) { /* should upclock */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 8ddab4c76710..6bef46ace831 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -169,11 +169,17 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
169 struct radeon_bo *bo = gem_to_radeon_bo(obj); 169 struct radeon_bo *bo = gem_to_radeon_bo(obj);
170 int ret = 0; 170 int ret = 0;
171 171
172 ret = radeon_bo_reserve(bo, false);
173 if (unlikely(ret != 0))
174 return ERR_PTR(ret);
175
172 /* pin buffer into GTT */ 176 /* pin buffer into GTT */
173 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); 177 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
174 if (ret) 178 if (ret) {
179 radeon_bo_unreserve(bo);
175 return ERR_PTR(ret); 180 return ERR_PTR(ret);
176 181 }
182 radeon_bo_unreserve(bo);
177 return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags); 183 return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
178} 184}
179 185
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 25f9eef12c42..e95c5e61d4e2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev)
908 return r; 908 return r;
909 } 909 }
910 910
911 r = r600_audio_init(rdev);
912 if (r) {
913 dev_err(rdev->dev, "failed initializing audio\n");
914 return r;
915 }
916
917 r = radeon_ib_pool_start(rdev); 911 r = radeon_ib_pool_start(rdev);
918 if (r) 912 if (r)
919 return r; 913 return r;
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev)
922 if (r) 916 if (r)
923 return r; 917 return r;
924 918
919 r = r600_audio_init(rdev);
920 if (r) {
921 dev_err(rdev->dev, "failed initializing audio\n");
922 return r;
923 }
924
925 return 0; 925 return 0;
926} 926}
927 927
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3277ddecfe9f..159b6a43fda0 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev)
637 return r; 637 return r;
638 } 638 }
639 639
640 r = r600_audio_init(rdev);
641 if (r) {
642 dev_err(rdev->dev, "failed initializing audio\n");
643 return r;
644 }
645
646 r = radeon_ib_pool_start(rdev); 640 r = radeon_ib_pool_start(rdev);
647 if (r) 641 if (r)
648 return r; 642 return r;
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev)
651 if (r) 645 if (r)
652 return r; 646 return r;
653 647
648 r = r600_audio_init(rdev);
649 if (r) {
650 dev_err(rdev->dev, "failed initializing audio\n");
651 return r;
652 }
653
654 return 0; 654 return 0;
655} 655}
656 656
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c2f473bc13b8..b4f51c569c36 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
151 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 151 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
152 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 152 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
153 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 153 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
154 if (rdev->family == CHIP_RV740)
155 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
154 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 156 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
155 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 157 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
156 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 158 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev)
363/* 365/*
364 * Core functions 366 * Core functions
365 */ 367 */
366static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
367 u32 num_tile_pipes,
368 u32 num_backends,
369 u32 backend_disable_mask)
370{
371 u32 backend_map = 0;
372 u32 enabled_backends_mask;
373 u32 enabled_backends_count;
374 u32 cur_pipe;
375 u32 swizzle_pipe[R7XX_MAX_PIPES];
376 u32 cur_backend;
377 u32 i;
378 bool force_no_swizzle;
379
380 if (num_tile_pipes > R7XX_MAX_PIPES)
381 num_tile_pipes = R7XX_MAX_PIPES;
382 if (num_tile_pipes < 1)
383 num_tile_pipes = 1;
384 if (num_backends > R7XX_MAX_BACKENDS)
385 num_backends = R7XX_MAX_BACKENDS;
386 if (num_backends < 1)
387 num_backends = 1;
388
389 enabled_backends_mask = 0;
390 enabled_backends_count = 0;
391 for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
392 if (((backend_disable_mask >> i) & 1) == 0) {
393 enabled_backends_mask |= (1 << i);
394 ++enabled_backends_count;
395 }
396 if (enabled_backends_count == num_backends)
397 break;
398 }
399
400 if (enabled_backends_count == 0) {
401 enabled_backends_mask = 1;
402 enabled_backends_count = 1;
403 }
404
405 if (enabled_backends_count != num_backends)
406 num_backends = enabled_backends_count;
407
408 switch (rdev->family) {
409 case CHIP_RV770:
410 case CHIP_RV730:
411 force_no_swizzle = false;
412 break;
413 case CHIP_RV710:
414 case CHIP_RV740:
415 default:
416 force_no_swizzle = true;
417 break;
418 }
419
420 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
421 switch (num_tile_pipes) {
422 case 1:
423 swizzle_pipe[0] = 0;
424 break;
425 case 2:
426 swizzle_pipe[0] = 0;
427 swizzle_pipe[1] = 1;
428 break;
429 case 3:
430 if (force_no_swizzle) {
431 swizzle_pipe[0] = 0;
432 swizzle_pipe[1] = 1;
433 swizzle_pipe[2] = 2;
434 } else {
435 swizzle_pipe[0] = 0;
436 swizzle_pipe[1] = 2;
437 swizzle_pipe[2] = 1;
438 }
439 break;
440 case 4:
441 if (force_no_swizzle) {
442 swizzle_pipe[0] = 0;
443 swizzle_pipe[1] = 1;
444 swizzle_pipe[2] = 2;
445 swizzle_pipe[3] = 3;
446 } else {
447 swizzle_pipe[0] = 0;
448 swizzle_pipe[1] = 2;
449 swizzle_pipe[2] = 3;
450 swizzle_pipe[3] = 1;
451 }
452 break;
453 case 5:
454 if (force_no_swizzle) {
455 swizzle_pipe[0] = 0;
456 swizzle_pipe[1] = 1;
457 swizzle_pipe[2] = 2;
458 swizzle_pipe[3] = 3;
459 swizzle_pipe[4] = 4;
460 } else {
461 swizzle_pipe[0] = 0;
462 swizzle_pipe[1] = 2;
463 swizzle_pipe[2] = 4;
464 swizzle_pipe[3] = 1;
465 swizzle_pipe[4] = 3;
466 }
467 break;
468 case 6:
469 if (force_no_swizzle) {
470 swizzle_pipe[0] = 0;
471 swizzle_pipe[1] = 1;
472 swizzle_pipe[2] = 2;
473 swizzle_pipe[3] = 3;
474 swizzle_pipe[4] = 4;
475 swizzle_pipe[5] = 5;
476 } else {
477 swizzle_pipe[0] = 0;
478 swizzle_pipe[1] = 2;
479 swizzle_pipe[2] = 4;
480 swizzle_pipe[3] = 5;
481 swizzle_pipe[4] = 3;
482 swizzle_pipe[5] = 1;
483 }
484 break;
485 case 7:
486 if (force_no_swizzle) {
487 swizzle_pipe[0] = 0;
488 swizzle_pipe[1] = 1;
489 swizzle_pipe[2] = 2;
490 swizzle_pipe[3] = 3;
491 swizzle_pipe[4] = 4;
492 swizzle_pipe[5] = 5;
493 swizzle_pipe[6] = 6;
494 } else {
495 swizzle_pipe[0] = 0;
496 swizzle_pipe[1] = 2;
497 swizzle_pipe[2] = 4;
498 swizzle_pipe[3] = 6;
499 swizzle_pipe[4] = 3;
500 swizzle_pipe[5] = 1;
501 swizzle_pipe[6] = 5;
502 }
503 break;
504 case 8:
505 if (force_no_swizzle) {
506 swizzle_pipe[0] = 0;
507 swizzle_pipe[1] = 1;
508 swizzle_pipe[2] = 2;
509 swizzle_pipe[3] = 3;
510 swizzle_pipe[4] = 4;
511 swizzle_pipe[5] = 5;
512 swizzle_pipe[6] = 6;
513 swizzle_pipe[7] = 7;
514 } else {
515 swizzle_pipe[0] = 0;
516 swizzle_pipe[1] = 2;
517 swizzle_pipe[2] = 4;
518 swizzle_pipe[3] = 6;
519 swizzle_pipe[4] = 3;
520 swizzle_pipe[5] = 1;
521 swizzle_pipe[6] = 7;
522 swizzle_pipe[7] = 5;
523 }
524 break;
525 }
526
527 cur_backend = 0;
528 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
529 while (((1 << cur_backend) & enabled_backends_mask) == 0)
530 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
531
532 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
533
534 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
535 }
536
537 return backend_map;
538}
539
540static void rv770_gpu_init(struct radeon_device *rdev) 368static void rv770_gpu_init(struct radeon_device *rdev)
541{ 369{
542 int i, j, num_qd_pipes; 370 int i, j, num_qd_pipes;
@@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
552 u32 sq_thread_resource_mgmt; 380 u32 sq_thread_resource_mgmt;
553 u32 hdp_host_path_cntl; 381 u32 hdp_host_path_cntl;
554 u32 sq_dyn_gpr_size_simd_ab_0; 382 u32 sq_dyn_gpr_size_simd_ab_0;
555 u32 backend_map;
556 u32 gb_tiling_config = 0; 383 u32 gb_tiling_config = 0;
557 u32 cc_rb_backend_disable = 0; 384 u32 cc_rb_backend_disable = 0;
558 u32 cc_gc_shader_pipe_config = 0; 385 u32 cc_gc_shader_pipe_config = 0;
559 u32 mc_arb_ramcfg; 386 u32 mc_arb_ramcfg;
560 u32 db_debug4; 387 u32 db_debug4, tmp;
388 u32 inactive_pipes, shader_pipe_config;
389 u32 disabled_rb_mask;
390 unsigned active_number;
561 391
562 /* setup chip specs */ 392 /* setup chip specs */
393 rdev->config.rv770.tiling_group_size = 256;
563 switch (rdev->family) { 394 switch (rdev->family) {
564 case CHIP_RV770: 395 case CHIP_RV770:
565 rdev->config.rv770.max_pipes = 4; 396 rdev->config.rv770.max_pipes = 4;
@@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev)
670 /* setup tiling, simd, pipe config */ 501 /* setup tiling, simd, pipe config */
671 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 502 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
672 503
504 shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
505 inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
506 for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
507 if (!(inactive_pipes & tmp)) {
508 active_number++;
509 }
510 tmp <<= 1;
511 }
512 if (active_number == 1) {
513 WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
514 } else {
515 WREG32(SPI_CONFIG_CNTL, 0);
516 }
517
518 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
519 tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
520 if (tmp < rdev->config.rv770.max_backends) {
521 rdev->config.rv770.max_backends = tmp;
522 }
523
524 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
525 tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
526 if (tmp < rdev->config.rv770.max_pipes) {
527 rdev->config.rv770.max_pipes = tmp;
528 }
529 tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
530 if (tmp < rdev->config.rv770.max_simds) {
531 rdev->config.rv770.max_simds = tmp;
532 }
533
673 switch (rdev->config.rv770.max_tile_pipes) { 534 switch (rdev->config.rv770.max_tile_pipes) {
674 case 1: 535 case 1:
675 default: 536 default:
676 gb_tiling_config |= PIPE_TILING(0); 537 gb_tiling_config = PIPE_TILING(0);
677 break; 538 break;
678 case 2: 539 case 2:
679 gb_tiling_config |= PIPE_TILING(1); 540 gb_tiling_config = PIPE_TILING(1);
680 break; 541 break;
681 case 4: 542 case 4:
682 gb_tiling_config |= PIPE_TILING(2); 543 gb_tiling_config = PIPE_TILING(2);
683 break; 544 break;
684 case 8: 545 case 8:
685 gb_tiling_config |= PIPE_TILING(3); 546 gb_tiling_config = PIPE_TILING(3);
686 break; 547 break;
687 } 548 }
688 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; 549 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
689 550
551 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
552 tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
553 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
554 R7XX_MAX_BACKENDS, disabled_rb_mask);
555 gb_tiling_config |= tmp << 16;
556 rdev->config.rv770.backend_map = tmp;
557
690 if (rdev->family == CHIP_RV770) 558 if (rdev->family == CHIP_RV770)
691 gb_tiling_config |= BANK_TILING(1); 559 gb_tiling_config |= BANK_TILING(1);
692 else 560 else {
693 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 561 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
562 gb_tiling_config |= BANK_TILING(1);
563 else
564 gb_tiling_config |= BANK_TILING(0);
565 }
694 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); 566 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
695 gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 567 gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
696 if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
697 rdev->config.rv770.tiling_group_size = 512;
698 else
699 rdev->config.rv770.tiling_group_size = 256;
700 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { 568 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
701 gb_tiling_config |= ROW_TILING(3); 569 gb_tiling_config |= ROW_TILING(3);
702 gb_tiling_config |= SAMPLE_SPLIT(3); 570 gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev)
708 } 576 }
709 577
710 gb_tiling_config |= BANK_SWAPS(1); 578 gb_tiling_config |= BANK_SWAPS(1);
711
712 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
713 cc_rb_backend_disable |=
714 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
715
716 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
717 cc_gc_shader_pipe_config |=
718 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
719 cc_gc_shader_pipe_config |=
720 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
721
722 if (rdev->family == CHIP_RV740)
723 backend_map = 0x28;
724 else
725 backend_map = r700_get_tile_pipe_to_backend_map(rdev,
726 rdev->config.rv770.max_tile_pipes,
727 (R7XX_MAX_BACKENDS -
728 r600_count_pipe_bits((cc_rb_backend_disable &
729 R7XX_MAX_BACKENDS_MASK) >> 16)),
730 (cc_rb_backend_disable >> 16));
731
732 rdev->config.rv770.tile_config = gb_tiling_config; 579 rdev->config.rv770.tile_config = gb_tiling_config;
733 rdev->config.rv770.backend_map = backend_map;
734 gb_tiling_config |= BACKEND_MAP(backend_map);
735 580
736 WREG32(GB_TILING_CONFIG, gb_tiling_config); 581 WREG32(GB_TILING_CONFIG, gb_tiling_config);
737 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 582 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
738 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 583 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
739 584
740 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
741 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
742 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
743 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
744
745 WREG32(CGTS_SYS_TCC_DISABLE, 0); 585 WREG32(CGTS_SYS_TCC_DISABLE, 0);
746 WREG32(CGTS_TCC_DISABLE, 0); 586 WREG32(CGTS_TCC_DISABLE, 0);
747 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); 587 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
748 WREG32(CGTS_USER_TCC_DISABLE, 0); 588 WREG32(CGTS_USER_TCC_DISABLE, 0);
749 589
750 num_qd_pipes = 590
751 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 591 num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
752 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); 592 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
753 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); 593 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
754 594
@@ -776,6 +616,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
776 ACK_FLUSH_CTL(3) | 616 ACK_FLUSH_CTL(3) |
777 SYNC_FLUSH_CTL)); 617 SYNC_FLUSH_CTL));
778 618
619 if (rdev->family != CHIP_RV770)
620 WREG32(SMX_SAR_CTL0, 0x00003f3f);
621
779 db_debug3 = RREG32(DB_DEBUG3); 622 db_debug3 = RREG32(DB_DEBUG3);
780 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f); 623 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
781 switch (rdev->family) { 624 switch (rdev->family) {
@@ -809,8 +652,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
809 652
810 WREG32(VGT_NUM_INSTANCES, 1); 653 WREG32(VGT_NUM_INSTANCES, 1);
811 654
812 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
813
814 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); 655 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
815 656
816 WREG32(CP_PERFMON_CNTL, 0); 657 WREG32(CP_PERFMON_CNTL, 0);
@@ -954,7 +795,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
954 795
955 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 796 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
956 NUM_CLIP_SEQ(3))); 797 NUM_CLIP_SEQ(3)));
957 798 WREG32(VC_ENHANCE, 0);
958} 799}
959 800
960void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 801void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
@@ -1118,6 +959,12 @@ static int rv770_startup(struct radeon_device *rdev)
1118 if (r) 959 if (r)
1119 return r; 960 return r;
1120 961
962 r = r600_audio_init(rdev);
963 if (r) {
964 DRM_ERROR("radeon: audio init failed\n");
965 return r;
966 }
967
1121 return 0; 968 return 0;
1122} 969}
1123 970
@@ -1140,12 +987,6 @@ int rv770_resume(struct radeon_device *rdev)
1140 return r; 987 return r;
1141 } 988 }
1142 989
1143 r = r600_audio_init(rdev);
1144 if (r) {
1145 dev_err(rdev->dev, "radeon: audio init failed\n");
1146 return r;
1147 }
1148
1149 return r; 990 return r;
1150 991
1151} 992}
@@ -1254,12 +1095,6 @@ int rv770_init(struct radeon_device *rdev)
1254 rdev->accel_working = false; 1095 rdev->accel_working = false;
1255 } 1096 }
1256 1097
1257 r = r600_audio_init(rdev);
1258 if (r) {
1259 dev_err(rdev->dev, "radeon: audio init failed\n");
1260 return r;
1261 }
1262
1263 return 0; 1098 return 0;
1264} 1099}
1265 1100
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9c549f702f2f..b0adfc595d75 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -106,10 +106,13 @@
106#define BACKEND_MAP(x) ((x) << 16) 106#define BACKEND_MAP(x) ((x) << 16)
107 107
108#define GB_TILING_CONFIG 0x98F0 108#define GB_TILING_CONFIG 0x98F0
109#define PIPE_TILING__SHIFT 1
110#define PIPE_TILING__MASK 0x0000000e
109 111
110#define GC_USER_SHADER_PIPE_CONFIG 0x8954 112#define GC_USER_SHADER_PIPE_CONFIG 0x8954
111#define INACTIVE_QD_PIPES(x) ((x) << 8) 113#define INACTIVE_QD_PIPES(x) ((x) << 8)
112#define INACTIVE_QD_PIPES_MASK 0x0000FF00 114#define INACTIVE_QD_PIPES_MASK 0x0000FF00
115#define INACTIVE_QD_PIPES_SHIFT 8
113#define INACTIVE_SIMDS(x) ((x) << 16) 116#define INACTIVE_SIMDS(x) ((x) << 16)
114#define INACTIVE_SIMDS_MASK 0x00FF0000 117#define INACTIVE_SIMDS_MASK 0x00FF0000
115 118
@@ -174,6 +177,7 @@
174#define MC_VM_MD_L1_TLB0_CNTL 0x2654 177#define MC_VM_MD_L1_TLB0_CNTL 0x2654
175#define MC_VM_MD_L1_TLB1_CNTL 0x2658 178#define MC_VM_MD_L1_TLB1_CNTL 0x2658
176#define MC_VM_MD_L1_TLB2_CNTL 0x265C 179#define MC_VM_MD_L1_TLB2_CNTL 0x265C
180#define MC_VM_MD_L1_TLB3_CNTL 0x2698
177#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C 181#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
178#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 182#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
179#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 183#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
@@ -207,6 +211,7 @@
207#define SCRATCH_UMSK 0x8540 211#define SCRATCH_UMSK 0x8540
208#define SCRATCH_ADDR 0x8544 212#define SCRATCH_ADDR 0x8544
209 213
214#define SMX_SAR_CTL0 0xA008
210#define SMX_DC_CTL0 0xA020 215#define SMX_DC_CTL0 0xA020
211#define USE_HASH_FUNCTION (1 << 0) 216#define USE_HASH_FUNCTION (1 << 0)
212#define CACHE_DEPTH(x) ((x) << 1) 217#define CACHE_DEPTH(x) ((x) << 1)
@@ -306,6 +311,8 @@
306#define TCP_CNTL 0x9610 311#define TCP_CNTL 0x9610
307#define TCP_CHAN_STEER 0x9614 312#define TCP_CHAN_STEER 0x9614
308 313
314#define VC_ENHANCE 0x9714
315
309#define VGT_CACHE_INVALIDATION 0x88C4 316#define VGT_CACHE_INVALIDATION 0x88C4
310#define CACHE_INVALIDATION(x) ((x)<<0) 317#define CACHE_INVALIDATION(x) ((x)<<0)
311#define VC_ONLY 0 318#define VC_ONLY 0
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 549732e56ca9..c7b61f16ecfd 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
867/* 867/*
868 * Core functions 868 * Core functions
869 */ 869 */
870static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
871 u32 num_tile_pipes,
872 u32 num_backends_per_asic,
873 u32 *backend_disable_mask_per_asic,
874 u32 num_shader_engines)
875{
876 u32 backend_map = 0;
877 u32 enabled_backends_mask = 0;
878 u32 enabled_backends_count = 0;
879 u32 num_backends_per_se;
880 u32 cur_pipe;
881 u32 swizzle_pipe[SI_MAX_PIPES];
882 u32 cur_backend = 0;
883 u32 i;
884 bool force_no_swizzle;
885
886 /* force legal values */
887 if (num_tile_pipes < 1)
888 num_tile_pipes = 1;
889 if (num_tile_pipes > rdev->config.si.max_tile_pipes)
890 num_tile_pipes = rdev->config.si.max_tile_pipes;
891 if (num_shader_engines < 1)
892 num_shader_engines = 1;
893 if (num_shader_engines > rdev->config.si.max_shader_engines)
894 num_shader_engines = rdev->config.si.max_shader_engines;
895 if (num_backends_per_asic < num_shader_engines)
896 num_backends_per_asic = num_shader_engines;
897 if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
898 num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
899
900 /* make sure we have the same number of backends per se */
901 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
902 /* set up the number of backends per se */
903 num_backends_per_se = num_backends_per_asic / num_shader_engines;
904 if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
905 num_backends_per_se = rdev->config.si.max_backends_per_se;
906 num_backends_per_asic = num_backends_per_se * num_shader_engines;
907 }
908
909 /* create enable mask and count for enabled backends */
910 for (i = 0; i < SI_MAX_BACKENDS; ++i) {
911 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
912 enabled_backends_mask |= (1 << i);
913 ++enabled_backends_count;
914 }
915 if (enabled_backends_count == num_backends_per_asic)
916 break;
917 }
918
919 /* force the backends mask to match the current number of backends */
920 if (enabled_backends_count != num_backends_per_asic) {
921 u32 this_backend_enabled;
922 u32 shader_engine;
923 u32 backend_per_se;
924
925 enabled_backends_mask = 0;
926 enabled_backends_count = 0;
927 *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
928 for (i = 0; i < SI_MAX_BACKENDS; ++i) {
929 /* calc the current se */
930 shader_engine = i / rdev->config.si.max_backends_per_se;
931 /* calc the backend per se */
932 backend_per_se = i % rdev->config.si.max_backends_per_se;
933 /* default to not enabled */
934 this_backend_enabled = 0;
935 if ((shader_engine < num_shader_engines) &&
936 (backend_per_se < num_backends_per_se))
937 this_backend_enabled = 1;
938 if (this_backend_enabled) {
939 enabled_backends_mask |= (1 << i);
940 *backend_disable_mask_per_asic &= ~(1 << i);
941 ++enabled_backends_count;
942 }
943 }
944 }
945
946
947 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
948 switch (rdev->family) {
949 case CHIP_TAHITI:
950 case CHIP_PITCAIRN:
951 case CHIP_VERDE:
952 force_no_swizzle = true;
953 break;
954 default:
955 force_no_swizzle = false;
956 break;
957 }
958 if (force_no_swizzle) {
959 bool last_backend_enabled = false;
960
961 force_no_swizzle = false;
962 for (i = 0; i < SI_MAX_BACKENDS; ++i) {
963 if (((enabled_backends_mask >> i) & 1) == 1) {
964 if (last_backend_enabled)
965 force_no_swizzle = true;
966 last_backend_enabled = true;
967 } else
968 last_backend_enabled = false;
969 }
970 }
971
972 switch (num_tile_pipes) {
973 case 1:
974 case 3:
975 case 5:
976 case 7:
977 DRM_ERROR("odd number of pipes!\n");
978 break;
979 case 2:
980 swizzle_pipe[0] = 0;
981 swizzle_pipe[1] = 1;
982 break;
983 case 4:
984 if (force_no_swizzle) {
985 swizzle_pipe[0] = 0;
986 swizzle_pipe[1] = 1;
987 swizzle_pipe[2] = 2;
988 swizzle_pipe[3] = 3;
989 } else {
990 swizzle_pipe[0] = 0;
991 swizzle_pipe[1] = 2;
992 swizzle_pipe[2] = 1;
993 swizzle_pipe[3] = 3;
994 }
995 break;
996 case 6:
997 if (force_no_swizzle) {
998 swizzle_pipe[0] = 0;
999 swizzle_pipe[1] = 1;
1000 swizzle_pipe[2] = 2;
1001 swizzle_pipe[3] = 3;
1002 swizzle_pipe[4] = 4;
1003 swizzle_pipe[5] = 5;
1004 } else {
1005 swizzle_pipe[0] = 0;
1006 swizzle_pipe[1] = 2;
1007 swizzle_pipe[2] = 4;
1008 swizzle_pipe[3] = 1;
1009 swizzle_pipe[4] = 3;
1010 swizzle_pipe[5] = 5;
1011 }
1012 break;
1013 case 8:
1014 if (force_no_swizzle) {
1015 swizzle_pipe[0] = 0;
1016 swizzle_pipe[1] = 1;
1017 swizzle_pipe[2] = 2;
1018 swizzle_pipe[3] = 3;
1019 swizzle_pipe[4] = 4;
1020 swizzle_pipe[5] = 5;
1021 swizzle_pipe[6] = 6;
1022 swizzle_pipe[7] = 7;
1023 } else {
1024 swizzle_pipe[0] = 0;
1025 swizzle_pipe[1] = 2;
1026 swizzle_pipe[2] = 4;
1027 swizzle_pipe[3] = 6;
1028 swizzle_pipe[4] = 1;
1029 swizzle_pipe[5] = 3;
1030 swizzle_pipe[6] = 5;
1031 swizzle_pipe[7] = 7;
1032 }
1033 break;
1034 }
1035
1036 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1037 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1038 cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
1039
1040 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1041
1042 cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
1043 }
1044
1045 return backend_map;
1046}
1047
1048static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
1049 u32 disable_mask_per_se,
1050 u32 max_disable_mask_per_se,
1051 u32 num_shader_engines)
1052{
1053 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
1054 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
1055
1056 if (num_shader_engines == 1)
1057 return disable_mask_per_asic;
1058 else if (num_shader_engines == 2)
1059 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
1060 else
1061 return 0xffffffff;
1062}
1063
1064static void si_tiling_mode_table_init(struct radeon_device *rdev) 870static void si_tiling_mode_table_init(struct radeon_device *rdev)
1065{ 871{
1066 const u32 num_tile_mode_states = 32; 872 const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
1562 DRM_ERROR("unknown asic: 0x%x\n", rdev->family); 1368 DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
1563} 1369}
1564 1370
1371static void si_select_se_sh(struct radeon_device *rdev,
1372 u32 se_num, u32 sh_num)
1373{
1374 u32 data = INSTANCE_BROADCAST_WRITES;
1375
1376 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1377 data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1378 else if (se_num == 0xffffffff)
1379 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1380 else if (sh_num == 0xffffffff)
1381 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
1382 else
1383 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
1384 WREG32(GRBM_GFX_INDEX, data);
1385}
1386
1387static u32 si_create_bitmask(u32 bit_width)
1388{
1389 u32 i, mask = 0;
1390
1391 for (i = 0; i < bit_width; i++) {
1392 mask <<= 1;
1393 mask |= 1;
1394 }
1395 return mask;
1396}
1397
1398static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
1399{
1400 u32 data, mask;
1401
1402 data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
1403 if (data & 1)
1404 data &= INACTIVE_CUS_MASK;
1405 else
1406 data = 0;
1407 data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
1408
1409 data >>= INACTIVE_CUS_SHIFT;
1410
1411 mask = si_create_bitmask(cu_per_sh);
1412
1413 return ~data & mask;
1414}
1415
1416static void si_setup_spi(struct radeon_device *rdev,
1417 u32 se_num, u32 sh_per_se,
1418 u32 cu_per_sh)
1419{
1420 int i, j, k;
1421 u32 data, mask, active_cu;
1422
1423 for (i = 0; i < se_num; i++) {
1424 for (j = 0; j < sh_per_se; j++) {
1425 si_select_se_sh(rdev, i, j);
1426 data = RREG32(SPI_STATIC_THREAD_MGMT_3);
1427 active_cu = si_get_cu_enabled(rdev, cu_per_sh);
1428
1429 mask = 1;
1430 for (k = 0; k < 16; k++) {
1431 mask <<= k;
1432 if (active_cu & mask) {
1433 data &= ~mask;
1434 WREG32(SPI_STATIC_THREAD_MGMT_3, data);
1435 break;
1436 }
1437 }
1438 }
1439 }
1440 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1441}
1442
1443static u32 si_get_rb_disabled(struct radeon_device *rdev,
1444 u32 max_rb_num, u32 se_num,
1445 u32 sh_per_se)
1446{
1447 u32 data, mask;
1448
1449 data = RREG32(CC_RB_BACKEND_DISABLE);
1450 if (data & 1)
1451 data &= BACKEND_DISABLE_MASK;
1452 else
1453 data = 0;
1454 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
1455
1456 data >>= BACKEND_DISABLE_SHIFT;
1457
1458 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
1459
1460 return data & mask;
1461}
1462
1463static void si_setup_rb(struct radeon_device *rdev,
1464 u32 se_num, u32 sh_per_se,
1465 u32 max_rb_num)
1466{
1467 int i, j;
1468 u32 data, mask;
1469 u32 disabled_rbs = 0;
1470 u32 enabled_rbs = 0;
1471
1472 for (i = 0; i < se_num; i++) {
1473 for (j = 0; j < sh_per_se; j++) {
1474 si_select_se_sh(rdev, i, j);
1475 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1476 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
1477 }
1478 }
1479 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1480
1481 mask = 1;
1482 for (i = 0; i < max_rb_num; i++) {
1483 if (!(disabled_rbs & mask))
1484 enabled_rbs |= mask;
1485 mask <<= 1;
1486 }
1487
1488 for (i = 0; i < se_num; i++) {
1489 si_select_se_sh(rdev, i, 0xffffffff);
1490 data = 0;
1491 for (j = 0; j < sh_per_se; j++) {
1492 switch (enabled_rbs & 3) {
1493 case 1:
1494 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1495 break;
1496 case 2:
1497 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1498 break;
1499 case 3:
1500 default:
1501 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1502 break;
1503 }
1504 enabled_rbs >>= 2;
1505 }
1506 WREG32(PA_SC_RASTER_CONFIG, data);
1507 }
1508 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1509}
1510
1565static void si_gpu_init(struct radeon_device *rdev) 1511static void si_gpu_init(struct radeon_device *rdev)
1566{ 1512{
1567 u32 cc_rb_backend_disable = 0;
1568 u32 cc_gc_shader_array_config;
1569 u32 gb_addr_config = 0; 1513 u32 gb_addr_config = 0;
1570 u32 mc_shared_chmap, mc_arb_ramcfg; 1514 u32 mc_shared_chmap, mc_arb_ramcfg;
1571 u32 gb_backend_map;
1572 u32 cgts_tcc_disable;
1573 u32 sx_debug_1; 1515 u32 sx_debug_1;
1574 u32 gc_user_shader_array_config;
1575 u32 gc_user_rb_backend_disable;
1576 u32 cgts_user_tcc_disable;
1577 u32 hdp_host_path_cntl; 1516 u32 hdp_host_path_cntl;
1578 u32 tmp; 1517 u32 tmp;
1579 int i, j; 1518 int i, j;
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev)
1581 switch (rdev->family) { 1520 switch (rdev->family) {
1582 case CHIP_TAHITI: 1521 case CHIP_TAHITI:
1583 rdev->config.si.max_shader_engines = 2; 1522 rdev->config.si.max_shader_engines = 2;
1584 rdev->config.si.max_pipes_per_simd = 4;
1585 rdev->config.si.max_tile_pipes = 12; 1523 rdev->config.si.max_tile_pipes = 12;
1586 rdev->config.si.max_simds_per_se = 8; 1524 rdev->config.si.max_cu_per_sh = 8;
1525 rdev->config.si.max_sh_per_se = 2;
1587 rdev->config.si.max_backends_per_se = 4; 1526 rdev->config.si.max_backends_per_se = 4;
1588 rdev->config.si.max_texture_channel_caches = 12; 1527 rdev->config.si.max_texture_channel_caches = 12;
1589 rdev->config.si.max_gprs = 256; 1528 rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev)
1594 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1533 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
1595 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1534 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1596 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1535 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1536 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1597 break; 1537 break;
1598 case CHIP_PITCAIRN: 1538 case CHIP_PITCAIRN:
1599 rdev->config.si.max_shader_engines = 2; 1539 rdev->config.si.max_shader_engines = 2;
1600 rdev->config.si.max_pipes_per_simd = 4;
1601 rdev->config.si.max_tile_pipes = 8; 1540 rdev->config.si.max_tile_pipes = 8;
1602 rdev->config.si.max_simds_per_se = 5; 1541 rdev->config.si.max_cu_per_sh = 5;
1542 rdev->config.si.max_sh_per_se = 2;
1603 rdev->config.si.max_backends_per_se = 4; 1543 rdev->config.si.max_backends_per_se = 4;
1604 rdev->config.si.max_texture_channel_caches = 8; 1544 rdev->config.si.max_texture_channel_caches = 8;
1605 rdev->config.si.max_gprs = 256; 1545 rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev)
1610 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1550 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
1611 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1551 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1612 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1552 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1553 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1613 break; 1554 break;
1614 case CHIP_VERDE: 1555 case CHIP_VERDE:
1615 default: 1556 default:
1616 rdev->config.si.max_shader_engines = 1; 1557 rdev->config.si.max_shader_engines = 1;
1617 rdev->config.si.max_pipes_per_simd = 4;
1618 rdev->config.si.max_tile_pipes = 4; 1558 rdev->config.si.max_tile_pipes = 4;
1619 rdev->config.si.max_simds_per_se = 2; 1559 rdev->config.si.max_cu_per_sh = 2;
1560 rdev->config.si.max_sh_per_se = 2;
1620 rdev->config.si.max_backends_per_se = 4; 1561 rdev->config.si.max_backends_per_se = 4;
1621 rdev->config.si.max_texture_channel_caches = 4; 1562 rdev->config.si.max_texture_channel_caches = 4;
1622 rdev->config.si.max_gprs = 256; 1563 rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev)
1627 rdev->config.si.sc_prim_fifo_size_backend = 0x40; 1568 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
1628 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1569 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1629 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1570 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1571 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
1630 break; 1572 break;
1631 } 1573 }
1632 1574
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev)
1648 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1590 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1649 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1591 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1650 1592
1651 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
1652 cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
1653 cgts_tcc_disable = 0xffff0000;
1654 for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
1655 cgts_tcc_disable &= ~(1 << (16 + i));
1656 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
1657 gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
1658 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
1659
1660 rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
1661 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; 1593 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
1662 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
1663 rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
1664 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
1665 rdev->config.si.backend_disable_mask_per_asic =
1666 si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
1667 rdev->config.si.num_shader_engines);
1668 rdev->config.si.backend_map =
1669 si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
1670 rdev->config.si.num_backends_per_se *
1671 rdev->config.si.num_shader_engines,
1672 &rdev->config.si.backend_disable_mask_per_asic,
1673 rdev->config.si.num_shader_engines);
1674 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
1675 rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
1676 rdev->config.si.mem_max_burst_length_bytes = 256; 1594 rdev->config.si.mem_max_burst_length_bytes = 256;
1677 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 1595 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1678 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 1596 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev)
1683 rdev->config.si.num_gpus = 1; 1601 rdev->config.si.num_gpus = 1;
1684 rdev->config.si.multi_gpu_tile_size = 64; 1602 rdev->config.si.multi_gpu_tile_size = 64;
1685 1603
1686 gb_addr_config = 0; 1604 /* fix up row size */
1687 switch (rdev->config.si.num_tile_pipes) { 1605 gb_addr_config &= ~ROW_SIZE_MASK;
1688 case 1:
1689 gb_addr_config |= NUM_PIPES(0);
1690 break;
1691 case 2:
1692 gb_addr_config |= NUM_PIPES(1);
1693 break;
1694 case 4:
1695 gb_addr_config |= NUM_PIPES(2);
1696 break;
1697 case 8:
1698 default:
1699 gb_addr_config |= NUM_PIPES(3);
1700 break;
1701 }
1702
1703 tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
1704 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
1705 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
1706 tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
1707 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
1708 switch (rdev->config.si.num_gpus) {
1709 case 1:
1710 default:
1711 gb_addr_config |= NUM_GPUS(0);
1712 break;
1713 case 2:
1714 gb_addr_config |= NUM_GPUS(1);
1715 break;
1716 case 4:
1717 gb_addr_config |= NUM_GPUS(2);
1718 break;
1719 }
1720 switch (rdev->config.si.multi_gpu_tile_size) {
1721 case 16:
1722 gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
1723 break;
1724 case 32:
1725 default:
1726 gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
1727 break;
1728 case 64:
1729 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
1730 break;
1731 case 128:
1732 gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
1733 break;
1734 }
1735 switch (rdev->config.si.mem_row_size_in_kb) { 1606 switch (rdev->config.si.mem_row_size_in_kb) {
1736 case 1: 1607 case 1:
1737 default: 1608 default:
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev)
1745 break; 1616 break;
1746 } 1617 }
1747 1618
1748 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
1749 rdev->config.si.num_tile_pipes = (1 << tmp);
1750 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
1751 rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
1752 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
1753 rdev->config.si.num_shader_engines = tmp + 1;
1754 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
1755 rdev->config.si.num_gpus = tmp + 1;
1756 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
1757 rdev->config.si.multi_gpu_tile_size = 1 << tmp;
1758 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
1759 rdev->config.si.mem_row_size_in_kb = 1 << tmp;
1760
1761 gb_backend_map =
1762 si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
1763 rdev->config.si.num_backends_per_se *
1764 rdev->config.si.num_shader_engines,
1765 &rdev->config.si.backend_disable_mask_per_asic,
1766 rdev->config.si.num_shader_engines);
1767
1768 /* setup tiling info dword. gb_addr_config is not adequate since it does 1619 /* setup tiling info dword. gb_addr_config is not adequate since it does
1769 * not have bank info, so create a custom tiling dword. 1620 * not have bank info, so create a custom tiling dword.
1770 * bits 3:0 num_pipes 1621 * bits 3:0 num_pipes
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev)
1789 rdev->config.si.tile_config |= (3 << 0); 1640 rdev->config.si.tile_config |= (3 << 0);
1790 break; 1641 break;
1791 } 1642 }
1792 rdev->config.si.tile_config |= 1643 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
1793 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1644 rdev->config.si.tile_config |= 1 << 4;
1645 else
1646 rdev->config.si.tile_config |= 0 << 4;
1794 rdev->config.si.tile_config |= 1647 rdev->config.si.tile_config |=
1795 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 1648 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1796 rdev->config.si.tile_config |= 1649 rdev->config.si.tile_config |=
1797 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 1650 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1798 1651
1799 rdev->config.si.backend_map = gb_backend_map;
1800 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1652 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1801 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1653 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1802 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 1654 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1803 1655
1804 /* primary versions */ 1656 si_tiling_mode_table_init(rdev);
1805 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1806 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1807 WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
1808
1809 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1810 1657
1811 /* user versions */ 1658 si_setup_rb(rdev, rdev->config.si.max_shader_engines,
1812 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1659 rdev->config.si.max_sh_per_se,
1813 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1660 rdev->config.si.max_backends_per_se);
1814 WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
1815 1661
1816 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 1662 si_setup_spi(rdev, rdev->config.si.max_shader_engines,
1663 rdev->config.si.max_sh_per_se,
1664 rdev->config.si.max_cu_per_sh);
1817 1665
1818 si_tiling_mode_table_init(rdev);
1819 1666
1820 /* set HW defaults for 3D engine */ 1667 /* set HW defaults for 3D engine */
1821 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 1668 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h
index eda938a7cb6e..501f9d431d57 100644
--- a/drivers/gpu/drm/radeon/si_reg.h
+++ b/drivers/gpu/drm/radeon/si_reg.h
@@ -30,4 +30,76 @@
30#define SI_DC_GPIO_HPD_EN 0x65b8 30#define SI_DC_GPIO_HPD_EN 0x65b8
31#define SI_DC_GPIO_HPD_Y 0x65bc 31#define SI_DC_GPIO_HPD_Y 0x65bc
32 32
33#define SI_GRPH_CONTROL 0x6804
34# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
35# define SI_GRPH_DEPTH_8BPP 0
36# define SI_GRPH_DEPTH_16BPP 1
37# define SI_GRPH_DEPTH_32BPP 2
38# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
39# define SI_ADDR_SURF_2_BANK 0
40# define SI_ADDR_SURF_4_BANK 1
41# define SI_ADDR_SURF_8_BANK 2
42# define SI_ADDR_SURF_16_BANK 3
43# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
44# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
45# define SI_ADDR_SURF_BANK_WIDTH_1 0
46# define SI_ADDR_SURF_BANK_WIDTH_2 1
47# define SI_ADDR_SURF_BANK_WIDTH_4 2
48# define SI_ADDR_SURF_BANK_WIDTH_8 3
49# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
50/* 8 BPP */
51# define SI_GRPH_FORMAT_INDEXED 0
52/* 16 BPP */
53# define SI_GRPH_FORMAT_ARGB1555 0
54# define SI_GRPH_FORMAT_ARGB565 1
55# define SI_GRPH_FORMAT_ARGB4444 2
56# define SI_GRPH_FORMAT_AI88 3
57# define SI_GRPH_FORMAT_MONO16 4
58# define SI_GRPH_FORMAT_BGRA5551 5
59/* 32 BPP */
60# define SI_GRPH_FORMAT_ARGB8888 0
61# define SI_GRPH_FORMAT_ARGB2101010 1
62# define SI_GRPH_FORMAT_32BPP_DIG 2
63# define SI_GRPH_FORMAT_8B_ARGB2101010 3
64# define SI_GRPH_FORMAT_BGRA1010102 4
65# define SI_GRPH_FORMAT_8B_BGRA1010102 5
66# define SI_GRPH_FORMAT_RGB111110 6
67# define SI_GRPH_FORMAT_BGR101111 7
68# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
69# define SI_ADDR_SURF_BANK_HEIGHT_1 0
70# define SI_ADDR_SURF_BANK_HEIGHT_2 1
71# define SI_ADDR_SURF_BANK_HEIGHT_4 2
72# define SI_ADDR_SURF_BANK_HEIGHT_8 3
73# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
74# define SI_ADDR_SURF_TILE_SPLIT_64B 0
75# define SI_ADDR_SURF_TILE_SPLIT_128B 1
76# define SI_ADDR_SURF_TILE_SPLIT_256B 2
77# define SI_ADDR_SURF_TILE_SPLIT_512B 3
78# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
79# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
80# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
81# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
82# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
83# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
84# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
85# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
86# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
87# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
88# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
89# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
90# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
91# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
92# define SI_ADDR_SURF_P2 0
93# define SI_ADDR_SURF_P4_8x16 4
94# define SI_ADDR_SURF_P4_16x16 5
95# define SI_ADDR_SURF_P4_16x32 6
96# define SI_ADDR_SURF_P4_32x32 7
97# define SI_ADDR_SURF_P8_16x16_8x16 8
98# define SI_ADDR_SURF_P8_16x32_8x16 9
99# define SI_ADDR_SURF_P8_32x32_8x16 10
100# define SI_ADDR_SURF_P8_16x32_16x16 11
101# define SI_ADDR_SURF_P8_32x32_16x16 12
102# define SI_ADDR_SURF_P8_32x32_16x32 13
103# define SI_ADDR_SURF_P8_32x64_32x32 14
104
33#endif 105#endif
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 53ea2c42dbd6..db4067962868 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -24,6 +24,11 @@
24#ifndef SI_H 24#ifndef SI_H
25#define SI_H 25#define SI_H
26 26
27#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
28
29#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
31
27#define CG_MULT_THERMAL_STATUS 0x714 32#define CG_MULT_THERMAL_STATUS 0x714
28#define ASIC_MAX_TEMP(x) ((x) << 0) 33#define ASIC_MAX_TEMP(x) ((x) << 0)
29#define ASIC_MAX_TEMP_MASK 0x000001ff 34#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -408,6 +413,12 @@
408#define SOFT_RESET_IA (1 << 15) 413#define SOFT_RESET_IA (1 << 15)
409 414
410#define GRBM_GFX_INDEX 0x802C 415#define GRBM_GFX_INDEX 0x802C
416#define INSTANCE_INDEX(x) ((x) << 0)
417#define SH_INDEX(x) ((x) << 8)
418#define SE_INDEX(x) ((x) << 16)
419#define SH_BROADCAST_WRITES (1 << 29)
420#define INSTANCE_BROADCAST_WRITES (1 << 30)
421#define SE_BROADCAST_WRITES (1 << 31)
411 422
412#define GRBM_INT_CNTL 0x8060 423#define GRBM_INT_CNTL 0x8060
413# define RDERR_INT_ENABLE (1 << 0) 424# define RDERR_INT_ENABLE (1 << 0)
@@ -480,6 +491,8 @@
480#define VGT_TF_MEMORY_BASE 0x89B8 491#define VGT_TF_MEMORY_BASE 0x89B8
481 492
482#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc 493#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
494#define INACTIVE_CUS_MASK 0xFFFF0000
495#define INACTIVE_CUS_SHIFT 16
483#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 496#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
484 497
485#define PA_CL_ENHANCE 0x8A14 498#define PA_CL_ENHANCE 0x8A14
@@ -688,6 +701,12 @@
688#define RLC_MC_CNTL 0xC344 701#define RLC_MC_CNTL 0xC344
689#define RLC_UCODE_CNTL 0xC348 702#define RLC_UCODE_CNTL 0xC348
690 703
704#define PA_SC_RASTER_CONFIG 0x28350
705# define RASTER_CONFIG_RB_MAP_0 0
706# define RASTER_CONFIG_RB_MAP_1 1
707# define RASTER_CONFIG_RB_MAP_2 2
708# define RASTER_CONFIG_RB_MAP_3 3
709
691#define VGT_EVENT_INITIATOR 0x28a90 710#define VGT_EVENT_INITIATOR 0x28a90
692# define SAMPLE_STREAMOUTSTATS1 (1 << 0) 711# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
693# define SAMPLE_STREAMOUTSTATS2 (2 << 0) 712# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 30d98d14b5c5..dd14cd1a0033 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -47,9 +47,9 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
47 if (dev_priv == NULL) 47 if (dev_priv == NULL)
48 return -ENOMEM; 48 return -ENOMEM;
49 49
50 idr_init(&dev_priv->object_idr);
50 dev->dev_private = (void *)dev_priv; 51 dev->dev_private = (void *)dev_priv;
51 dev_priv->chipset = chipset; 52 dev_priv->chipset = chipset;
52 idr_init(&dev->object_name_idr);
53 53
54 return 0; 54 return 0;
55} 55}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 36792bd4da77..36f4b28c1b90 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1204 (*destroy)(bo); 1204 (*destroy)(bo);
1205 else 1205 else
1206 kfree(bo); 1206 kfree(bo);
1207 ttm_mem_global_free(mem_glob, acc_size);
1207 return -EINVAL; 1208 return -EINVAL;
1208 } 1209 }
1209 bo->destroy = destroy; 1210 bo->destroy = destroy;
@@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1307 struct ttm_buffer_object **p_bo) 1308 struct ttm_buffer_object **p_bo)
1308{ 1309{
1309 struct ttm_buffer_object *bo; 1310 struct ttm_buffer_object *bo;
1310 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1311 size_t acc_size; 1311 size_t acc_size;
1312 int ret; 1312 int ret;
1313 1313
1314 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1315 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1316 if (unlikely(ret != 0))
1317 return ret;
1318
1319 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1314 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1320 1315 if (unlikely(bo == NULL))
1321 if (unlikely(bo == NULL)) {
1322 ttm_mem_global_free(mem_glob, acc_size);
1323 return -ENOMEM; 1316 return -ENOMEM;
1324 }
1325 1317
1318 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1326 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1319 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1327 buffer_start, interruptible, 1320 buffer_start, interruptible,
1328 persistent_swap_storage, acc_size, NULL, NULL); 1321 persistent_swap_storage, acc_size, NULL, NULL);
@@ -1834,6 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1834 spin_unlock(&glob->lru_lock); 1827 spin_unlock(&glob->lru_lock);
1835 (void) ttm_bo_cleanup_refs(bo, false, false, false); 1828 (void) ttm_bo_cleanup_refs(bo, false, false, false);
1836 kref_put(&bo->list_kref, ttm_bo_release_list); 1829 kref_put(&bo->list_kref, ttm_bo_release_list);
1830 spin_lock(&glob->lru_lock);
1837 continue; 1831 continue;
1838 } 1832 }
1839 1833
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 4d02c46a9420..6e52069894b3 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -13,8 +13,21 @@
13 13
14static struct drm_driver driver; 14static struct drm_driver driver;
15 15
16/*
17 * There are many DisplayLink-based graphics products, all with unique PIDs.
18 * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
19 * We also require a match on SubClass (0x00) and Protocol (0x00),
20 * which is compatible with all known USB 2.0 era graphics chips and firmware,
21 * but allows DisplayLink to increment those for any future incompatible chips
22 */
16static struct usb_device_id id_table[] = { 23static struct usb_device_id id_table[] = {
17 {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,}, 24 {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
25 .bInterfaceSubClass = 0x00,
26 .bInterfaceProtocol = 0x00,
27 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
28 USB_DEVICE_ID_MATCH_INT_CLASS |
29 USB_DEVICE_ID_MATCH_INT_SUBCLASS |
30 USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
18 {}, 31 {},
19}; 32};
20MODULE_DEVICE_TABLE(usb, id_table); 33MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 1f182254e81e..c126182ac07e 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -100,12 +100,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
100 if (dev_priv == NULL) 100 if (dev_priv == NULL)
101 return -ENOMEM; 101 return -ENOMEM;
102 102
103 idr_init(&dev_priv->object_idr);
103 dev->dev_private = (void *)dev_priv; 104 dev->dev_private = (void *)dev_priv;
104 105
105 dev_priv->chipset = chipset; 106 dev_priv->chipset = chipset;
106 107
107 idr_init(&dev->object_name_idr);
108
109 pci_set_master(dev->pdev); 108 pci_set_master(dev->pdev);
110 109
111 ret = drm_vblank_init(dev, 1); 110 ret = drm_vblank_init(dev, 1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 51c9ba5cd2fb..21ee78226560 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
66 cmd += sizeof(remap_cmd) / sizeof(uint32); 66 cmd += sizeof(remap_cmd) / sizeof(uint32);
67 67
68 for (i = 0; i < num_pages; ++i) { 68 for (i = 0; i < num_pages; ++i) {
69 if (VMW_PPN_SIZE > 4) 69 if (VMW_PPN_SIZE <= 4)
70 *cmd = page_to_pfn(*pages++); 70 *cmd = page_to_pfn(*pages++);
71 else 71 else
72 *((uint64_t *)cmd) = page_to_pfn(*pages++); 72 *((uint64_t *)cmd) = page_to_pfn(*pages++);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 38f9534ac513..5b3c7d135dc9 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -190,6 +190,19 @@ find_active_client(struct list_head *head)
190 return NULL; 190 return NULL;
191} 191}
192 192
193int vga_switcheroo_get_client_state(struct pci_dev *pdev)
194{
195 struct vga_switcheroo_client *client;
196
197 client = find_client_from_pci(&vgasr_priv.clients, pdev);
198 if (!client)
199 return VGA_SWITCHEROO_NOT_FOUND;
200 if (!vgasr_priv.active)
201 return VGA_SWITCHEROO_INIT;
202 return client->pwr_state;
203}
204EXPORT_SYMBOL(vga_switcheroo_get_client_state);
205
193void vga_switcheroo_unregister_client(struct pci_dev *pdev) 206void vga_switcheroo_unregister_client(struct pci_dev *pdev)
194{ 207{
195 struct vga_switcheroo_client *client; 208 struct vga_switcheroo_client *client;
@@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
291 vga_switchon(new_client); 304 vga_switchon(new_client);
292 305
293 vga_set_default_device(new_client->pdev); 306 vga_set_default_device(new_client->pdev);
294 set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
295
296 return 0; 307 return 0;
297} 308}
298 309
@@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
308 319
309 active->active = false; 320 active->active = false;
310 321
322 set_audio_state(active->id, VGA_SWITCHEROO_OFF);
323
311 if (new_client->fb_info) { 324 if (new_client->fb_info) {
312 struct fb_event event; 325 struct fb_event event;
313 event.info = new_client->fb_info; 326 event.info = new_client->fb_info;
@@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
321 if (new_client->ops->reprobe) 334 if (new_client->ops->reprobe)
322 new_client->ops->reprobe(new_client->pdev); 335 new_client->ops->reprobe(new_client->pdev);
323 336
324 set_audio_state(active->id, VGA_SWITCHEROO_OFF);
325
326 if (active->pwr_state == VGA_SWITCHEROO_ON) 337 if (active->pwr_state == VGA_SWITCHEROO_ON)
327 vga_switchoff(active); 338 vga_switchoff(active);
328 339
340 set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
341
329 new_client->active = true; 342 new_client->active = true;
330 return 0; 343 return 0;
331} 344}
@@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
371 /* pwr off the device not in use */ 384 /* pwr off the device not in use */
372 if (strncmp(usercmd, "OFF", 3) == 0) { 385 if (strncmp(usercmd, "OFF", 3) == 0) {
373 list_for_each_entry(client, &vgasr_priv.clients, list) { 386 list_for_each_entry(client, &vgasr_priv.clients, list) {
374 if (client->active) 387 if (client->active || client_is_audio(client))
375 continue; 388 continue;
389 set_audio_state(client->id, VGA_SWITCHEROO_OFF);
376 if (client->pwr_state == VGA_SWITCHEROO_ON) 390 if (client->pwr_state == VGA_SWITCHEROO_ON)
377 vga_switchoff(client); 391 vga_switchoff(client);
378 } 392 }
@@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
381 /* pwr on the device not in use */ 395 /* pwr on the device not in use */
382 if (strncmp(usercmd, "ON", 2) == 0) { 396 if (strncmp(usercmd, "ON", 2) == 0) {
383 list_for_each_entry(client, &vgasr_priv.clients, list) { 397 list_for_each_entry(client, &vgasr_priv.clients, list) {
384 if (client->active) 398 if (client->active || client_is_audio(client))
385 continue; 399 continue;
386 if (client->pwr_state == VGA_SWITCHEROO_OFF) 400 if (client->pwr_state == VGA_SWITCHEROO_OFF)
387 vga_switchon(client); 401 vga_switchon(client);
402 set_audio_state(client->id, VGA_SWITCHEROO_ON);
388 } 403 }
389 goto out; 404 goto out;
390 } 405 }
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index f082e48ab113..70d62f5bc909 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
215 int i; 215 int i;
216 216
217 if (send_command(cmd) || send_argument(key)) { 217 if (send_command(cmd) || send_argument(key)) {
218 pr_warn("%s: read arg fail\n", key); 218 pr_warn("%.4s: read arg fail\n", key);
219 return -EIO; 219 return -EIO;
220 } 220 }
221 221
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
223 223
224 for (i = 0; i < len; i++) { 224 for (i = 0; i < len; i++) {
225 if (__wait_status(0x05)) { 225 if (__wait_status(0x05)) {
226 pr_warn("%s: read data fail\n", key); 226 pr_warn("%.4s: read data fail\n", key);
227 return -EIO; 227 return -EIO;
228 } 228 }
229 buffer[i] = inb(APPLESMC_DATA_PORT); 229 buffer[i] = inb(APPLESMC_DATA_PORT);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index b9d512331ed4..7f1feb2f467a 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -191,6 +191,24 @@ static ssize_t show_temp(struct device *dev,
191 return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN; 191 return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
192} 192}
193 193
194struct tjmax {
195 char const *id;
196 int tjmax;
197};
198
199static struct tjmax __cpuinitconst tjmax_table[] = {
200 { "CPU D410", 100000 },
201 { "CPU D425", 100000 },
202 { "CPU D510", 100000 },
203 { "CPU D525", 100000 },
204 { "CPU N450", 100000 },
205 { "CPU N455", 100000 },
206 { "CPU N470", 100000 },
207 { "CPU N475", 100000 },
208 { "CPU 230", 100000 },
209 { "CPU 330", 125000 },
210};
211
194static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, 212static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
195 struct device *dev) 213 struct device *dev)
196{ 214{
@@ -202,6 +220,13 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
202 int err; 220 int err;
203 u32 eax, edx; 221 u32 eax, edx;
204 struct pci_dev *host_bridge; 222 struct pci_dev *host_bridge;
223 int i;
224
225 /* explicit tjmax table entries override heuristics */
226 for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
227 if (strstr(c->x86_model_id, tjmax_table[i].id))
228 return tjmax_table[i].tjmax;
229 }
205 230
206 /* Early chips have no MSR for TjMax */ 231 /* Early chips have no MSR for TjMax */
207 232
@@ -210,7 +235,8 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
210 235
211 /* Atom CPUs */ 236 /* Atom CPUs */
212 237
213 if (c->x86_model == 0x1c) { 238 if (c->x86_model == 0x1c || c->x86_model == 0x26
239 || c->x86_model == 0x27) {
214 usemsr_ee = 0; 240 usemsr_ee = 0;
215 241
216 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); 242 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
@@ -223,6 +249,9 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
223 tjmax = 90000; 249 tjmax = 90000;
224 250
225 pci_dev_put(host_bridge); 251 pci_dev_put(host_bridge);
252 } else if (c->x86_model == 0x36) {
253 usemsr_ee = 0;
254 tjmax = 100000;
226 } 255 }
227 256
228 if (c->x86_model > 0xe && usemsr_ee) { 257 if (c->x86_model > 0xe && usemsr_ee) {
@@ -772,7 +801,7 @@ MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
772 801
773static int __init coretemp_init(void) 802static int __init coretemp_init(void)
774{ 803{
775 int i, err = -ENODEV; 804 int i, err;
776 805
777 /* 806 /*
778 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 807 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 9691f664c76e..e7d234b59312 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -451,11 +451,15 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
451 data->fan_rpm_control = true; 451 data->fan_rpm_control = true;
452 break; 452 break;
453 default: 453 default:
454 mutex_unlock(&data->update_lock); 454 count = -EINVAL;
455 return -EINVAL; 455 goto err;
456 } 456 }
457 457
458 read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); 458 result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
459 if (result) {
460 count = result;
461 goto err;
462 }
459 463
460 if (data->fan_rpm_control) 464 if (data->fan_rpm_control)
461 conf_reg |= 0x80; 465 conf_reg |= 0x80;
@@ -463,7 +467,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
463 conf_reg &= ~0x80; 467 conf_reg &= ~0x80;
464 468
465 i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg); 469 i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
466 470err:
467 mutex_unlock(&data->update_lock); 471 mutex_unlock(&data->update_lock);
468 return count; 472 return count;
469} 473}
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index beb2491db274..a0edd9854218 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -37,4 +37,16 @@ config I2C_MUX_PCA954x
37 This driver can also be built as a module. If so, the module 37 This driver can also be built as a module. If so, the module
38 will be called i2c-mux-pca954x. 38 will be called i2c-mux-pca954x.
39 39
40config I2C_MUX_PINCTRL
41 tristate "pinctrl-based I2C multiplexer"
42 depends on PINCTRL
43 help
44 If you say yes to this option, support will be included for an I2C
45 multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
46 This is useful for SoCs whose I2C module's signals can be routed to
47 different sets of pins at run-time.
48
49 This driver can also be built as a module. If so, the module will be
50 called pinctrl-i2cmux.
51
40endmenu 52endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 5826249b29ca..76da8692afff 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -4,5 +4,6 @@
4obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o 4obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
5obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o 5obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
6obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o 6obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
7obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
7 8
8ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG 9ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
new file mode 100644
index 000000000000..46a669763476
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -0,0 +1,279 @@
1/*
2 * I2C multiplexer using pinctrl API
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/i2c.h>
20#include <linux/i2c-mux.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/of_i2c.h>
24#include <linux/pinctrl/consumer.h>
25#include <linux/i2c-mux-pinctrl.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
28
29struct i2c_mux_pinctrl {
30 struct device *dev;
31 struct i2c_mux_pinctrl_platform_data *pdata;
32 struct pinctrl *pinctrl;
33 struct pinctrl_state **states;
34 struct pinctrl_state *state_idle;
35 struct i2c_adapter *parent;
36 struct i2c_adapter **busses;
37};
38
39static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
40 u32 chan)
41{
42 struct i2c_mux_pinctrl *mux = data;
43
44 return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
45}
46
47static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
48 u32 chan)
49{
50 struct i2c_mux_pinctrl *mux = data;
51
52 return pinctrl_select_state(mux->pinctrl, mux->state_idle);
53}
54
55#ifdef CONFIG_OF
56static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
57 struct platform_device *pdev)
58{
59 struct device_node *np = pdev->dev.of_node;
60 int num_names, i, ret;
61 struct device_node *adapter_np;
62 struct i2c_adapter *adapter;
63
64 if (!np)
65 return 0;
66
67 mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
68 if (!mux->pdata) {
69 dev_err(mux->dev,
70 "Cannot allocate i2c_mux_pinctrl_platform_data\n");
71 return -ENOMEM;
72 }
73
74 num_names = of_property_count_strings(np, "pinctrl-names");
75 if (num_names < 0) {
76 dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
77 num_names);
78 return num_names;
79 }
80
81 mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
82 sizeof(*mux->pdata->pinctrl_states) * num_names,
83 GFP_KERNEL);
84 if (!mux->pdata->pinctrl_states) {
85 dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
86 return -ENOMEM;
87 }
88
89 for (i = 0; i < num_names; i++) {
90 ret = of_property_read_string_index(np, "pinctrl-names", i,
91 &mux->pdata->pinctrl_states[mux->pdata->bus_count]);
92 if (ret < 0) {
93 dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
94 ret);
95 return ret;
96 }
97 if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
98 "idle")) {
99 if (i != num_names - 1) {
100 dev_err(mux->dev, "idle state must be last\n");
101 return -EINVAL;
102 }
103 mux->pdata->pinctrl_state_idle = "idle";
104 } else {
105 mux->pdata->bus_count++;
106 }
107 }
108
109 adapter_np = of_parse_phandle(np, "i2c-parent", 0);
110 if (!adapter_np) {
111 dev_err(mux->dev, "Cannot parse i2c-parent\n");
112 return -ENODEV;
113 }
114 adapter = of_find_i2c_adapter_by_node(adapter_np);
115 if (!adapter) {
116 dev_err(mux->dev, "Cannot find parent bus\n");
117 return -ENODEV;
118 }
119 mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
120 put_device(&adapter->dev);
121
122 return 0;
123}
124#else
125static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
126 struct platform_device *pdev)
127{
128 return 0;
129}
130#endif
131
132static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
133{
134 struct i2c_mux_pinctrl *mux;
135 int (*deselect)(struct i2c_adapter *, void *, u32);
136 int i, ret;
137
138 mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
139 if (!mux) {
140 dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
141 ret = -ENOMEM;
142 goto err;
143 }
144 platform_set_drvdata(pdev, mux);
145
146 mux->dev = &pdev->dev;
147
148 mux->pdata = pdev->dev.platform_data;
149 if (!mux->pdata) {
150 ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
151 if (ret < 0)
152 goto err;
153 }
154 if (!mux->pdata) {
155 dev_err(&pdev->dev, "Missing platform data\n");
156 ret = -ENODEV;
157 goto err;
158 }
159
160 mux->states = devm_kzalloc(&pdev->dev,
161 sizeof(*mux->states) * mux->pdata->bus_count,
162 GFP_KERNEL);
163 if (!mux->states) {
164 dev_err(&pdev->dev, "Cannot allocate states\n");
165 ret = -ENOMEM;
166 goto err;
167 }
168
169 mux->busses = devm_kzalloc(&pdev->dev,
170 sizeof(mux->busses) * mux->pdata->bus_count,
171 GFP_KERNEL);
172 if (!mux->states) {
173 dev_err(&pdev->dev, "Cannot allocate busses\n");
174 ret = -ENOMEM;
175 goto err;
176 }
177
178 mux->pinctrl = devm_pinctrl_get(&pdev->dev);
179 if (IS_ERR(mux->pinctrl)) {
180 ret = PTR_ERR(mux->pinctrl);
181 dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret);
182 goto err;
183 }
184 for (i = 0; i < mux->pdata->bus_count; i++) {
185 mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
186 mux->pdata->pinctrl_states[i]);
187 if (IS_ERR(mux->states[i])) {
188 ret = PTR_ERR(mux->states[i]);
189 dev_err(&pdev->dev,
190 "Cannot look up pinctrl state %s: %d\n",
191 mux->pdata->pinctrl_states[i], ret);
192 goto err;
193 }
194 }
195 if (mux->pdata->pinctrl_state_idle) {
196 mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
197 mux->pdata->pinctrl_state_idle);
198 if (IS_ERR(mux->state_idle)) {
199 ret = PTR_ERR(mux->state_idle);
200 dev_err(&pdev->dev,
201 "Cannot look up pinctrl state %s: %d\n",
202 mux->pdata->pinctrl_state_idle, ret);
203 goto err;
204 }
205
206 deselect = i2c_mux_pinctrl_deselect;
207 } else {
208 deselect = NULL;
209 }
210
211 mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
212 if (!mux->parent) {
213 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
214 mux->pdata->parent_bus_num);
215 ret = -ENODEV;
216 goto err;
217 }
218
219 for (i = 0; i < mux->pdata->bus_count; i++) {
220 u32 bus = mux->pdata->base_bus_num ?
221 (mux->pdata->base_bus_num + i) : 0;
222
223 mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
224 mux, bus, i,
225 i2c_mux_pinctrl_select,
226 deselect);
227 if (!mux->busses[i]) {
228 ret = -ENODEV;
229 dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
230 goto err_del_adapter;
231 }
232 }
233
234 return 0;
235
236err_del_adapter:
237 for (; i > 0; i--)
238 i2c_del_mux_adapter(mux->busses[i - 1]);
239 i2c_put_adapter(mux->parent);
240err:
241 return ret;
242}
243
244static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
245{
246 struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
247 int i;
248
249 for (i = 0; i < mux->pdata->bus_count; i++)
250 i2c_del_mux_adapter(mux->busses[i]);
251
252 i2c_put_adapter(mux->parent);
253
254 return 0;
255}
256
257#ifdef CONFIG_OF
258static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
259 { .compatible = "i2c-mux-pinctrl", },
260 {},
261};
262MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
263#endif
264
265static struct platform_driver i2c_mux_pinctrl_driver = {
266 .driver = {
267 .name = "i2c-mux-pinctrl",
268 .owner = THIS_MODULE,
269 .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
270 },
271 .probe = i2c_mux_pinctrl_probe,
272 .remove = __devexit_p(i2c_mux_pinctrl_remove),
273};
274module_platform_driver(i2c_mux_pinctrl_driver);
275
276MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver");
277MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
278MODULE_LICENSE("GPL v2");
279MODULE_ALIAS("platform:i2c-mux-pinctrl");
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 8716066a2f2b..bcb507b0cfd4 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = {
236 */ 236 */
237static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) 237static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
238{ 238{
239 unsigned long cycle_time; 239 unsigned long cycle_time = 0;
240 int use_dma_info = 0; 240 int use_dma_info = 0;
241 const u8 xfer_mode = drive->dma_mode; 241 const u8 xfer_mode = drive->dma_mode;
242 242
@@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
271 271
272 ide_set_drivedata(drive, (void *)cycle_time); 272 ide_set_drivedata(drive, (void *)cycle_time);
273 273
274 printk("%s: %s selected (peak %dMB/s)\n", drive->name, 274 printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
275 ide_xfer_verbose(xfer_mode), 275 drive->name, ide_xfer_verbose(xfer_mode),
276 2000 / (unsigned long)ide_get_drivedata(drive)); 276 2000 / (cycle_time ? cycle_time : (unsigned long) -1));
277} 277}
278 278
279static const struct ide_port_ops icside_v6_port_ops = { 279static const struct ide_port_ops icside_v6_port_ops = {
@@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = {
375 .dma_test_irq = icside_dma_test_irq, 375 .dma_test_irq = icside_dma_test_irq,
376 .dma_lost_irq = ide_dma_lost_irq, 376 .dma_lost_irq = ide_dma_lost_irq,
377}; 377};
378#else
379#define icside_v6_dma_ops NULL
380#endif 378#endif
381 379
382static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) 380static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
@@ -456,7 +454,6 @@ err_free:
456static const struct ide_port_info icside_v6_port_info __initdata = { 454static const struct ide_port_info icside_v6_port_info __initdata = {
457 .init_dma = icside_dma_off_init, 455 .init_dma = icside_dma_off_init,
458 .port_ops = &icside_v6_no_dma_port_ops, 456 .port_ops = &icside_v6_no_dma_port_ops,
459 .dma_ops = &icside_v6_dma_ops,
460 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, 457 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
461 .mwdma_mask = ATA_MWDMA2, 458 .mwdma_mask = ATA_MWDMA2,
462 .swdma_mask = ATA_SWDMA2, 459 .swdma_mask = ATA_SWDMA2,
@@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
518 515
519 ecard_set_drvdata(ec, state); 516 ecard_set_drvdata(ec, state);
520 517
518#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
521 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { 519 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
522 d.init_dma = icside_dma_init; 520 d.init_dma = icside_dma_init;
523 d.port_ops = &icside_v6_port_ops; 521 d.port_ops = &icside_v6_port_ops;
524 } else 522 d.dma_ops = &icside_v6_dma_ops;
525 d.dma_ops = NULL; 523 }
524#endif
526 525
527 ret = ide_host_register(host, &d, hws); 526 ret = ide_host_register(host, &d, hws);
528 if (ret) 527 if (ret)
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 28e344ea514c..f1e922e2479a 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
167{ 167{
168 int *is_kme = priv_data; 168 int *is_kme = priv_data;
169 169
170 if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { 170 if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
171 != IO_DATA_PATH_WIDTH_8) {
171 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; 172 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
172 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; 173 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
173 } 174 }
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 56eecefcec75..2ec93da41e2c 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -8,8 +8,7 @@ menuconfig IIO
8 help 8 help
9 The industrial I/O subsystem provides a unified framework for 9 The industrial I/O subsystem provides a unified framework for
10 drivers for many different types of embedded sensors using a 10 drivers for many different types of embedded sensors using a
11 number of different physical interfaces (i2c, spi, etc). See 11 number of different physical interfaces (i2c, spi, etc).
12 Documentation/iio for more information.
13 12
14if IIO 13if IIO
15 14
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 1ddd8861c71b..4f947e4377ef 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -661,7 +661,6 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
661 * New channel registration method - relies on the fact a group does 661 * New channel registration method - relies on the fact a group does
662 * not need to be initialized if it is name is NULL. 662 * not need to be initialized if it is name is NULL.
663 */ 663 */
664 INIT_LIST_HEAD(&indio_dev->channel_attr_list);
665 if (indio_dev->channels) 664 if (indio_dev->channels)
666 for (i = 0; i < indio_dev->num_channels; i++) { 665 for (i = 0; i < indio_dev->num_channels; i++) {
667 ret = iio_device_add_channel_sysfs(indio_dev, 666 ret = iio_device_add_channel_sysfs(indio_dev,
@@ -725,12 +724,16 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
725static void iio_dev_release(struct device *device) 724static void iio_dev_release(struct device *device)
726{ 725{
727 struct iio_dev *indio_dev = dev_to_iio_dev(device); 726 struct iio_dev *indio_dev = dev_to_iio_dev(device);
728 cdev_del(&indio_dev->chrdev); 727 if (indio_dev->chrdev.dev)
728 cdev_del(&indio_dev->chrdev);
729 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 729 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
730 iio_device_unregister_trigger_consumer(indio_dev); 730 iio_device_unregister_trigger_consumer(indio_dev);
731 iio_device_unregister_eventset(indio_dev); 731 iio_device_unregister_eventset(indio_dev);
732 iio_device_unregister_sysfs(indio_dev); 732 iio_device_unregister_sysfs(indio_dev);
733 iio_device_unregister_debugfs(indio_dev); 733 iio_device_unregister_debugfs(indio_dev);
734
735 ida_simple_remove(&iio_ida, indio_dev->id);
736 kfree(indio_dev);
734} 737}
735 738
736static struct device_type iio_dev_type = { 739static struct device_type iio_dev_type = {
@@ -761,6 +764,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
761 dev_set_drvdata(&dev->dev, (void *)dev); 764 dev_set_drvdata(&dev->dev, (void *)dev);
762 mutex_init(&dev->mlock); 765 mutex_init(&dev->mlock);
763 mutex_init(&dev->info_exist_lock); 766 mutex_init(&dev->info_exist_lock);
767 INIT_LIST_HEAD(&dev->channel_attr_list);
764 768
765 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL); 769 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
766 if (dev->id < 0) { 770 if (dev->id < 0) {
@@ -778,10 +782,8 @@ EXPORT_SYMBOL(iio_device_alloc);
778 782
779void iio_device_free(struct iio_dev *dev) 783void iio_device_free(struct iio_dev *dev)
780{ 784{
781 if (dev) { 785 if (dev)
782 ida_simple_remove(&iio_ida, dev->id); 786 put_device(&dev->dev);
783 kfree(dev);
784 }
785} 787}
786EXPORT_SYMBOL(iio_device_free); 788EXPORT_SYMBOL(iio_device_free);
787 789
@@ -902,7 +904,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
902 mutex_lock(&indio_dev->info_exist_lock); 904 mutex_lock(&indio_dev->info_exist_lock);
903 indio_dev->info = NULL; 905 indio_dev->info = NULL;
904 mutex_unlock(&indio_dev->info_exist_lock); 906 mutex_unlock(&indio_dev->info_exist_lock);
905 device_unregister(&indio_dev->dev); 907 device_del(&indio_dev->dev);
906} 908}
907EXPORT_SYMBOL(iio_device_unregister); 909EXPORT_SYMBOL(iio_device_unregister);
908subsys_initcall(iio_init); 910subsys_initcall(iio_init);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 55d5642eb10a..2e826f9702c6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
1184 1184
1185static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1185static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
1186{ 1186{
1187 return (((ib_event->event == IB_CM_REQ_RECEIVED) || 1187 return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
1188 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1188 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
1189 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1189 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
1190 (id->qp_type == IB_QPT_UD)) || 1190 (id->qp_type == IB_QPT_UD)) ||
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 55ab284e22f2..b18870c455ad 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
1593 struct net_device *pdev; 1593 struct net_device *pdev;
1594 1594
1595 pdev = ip_dev_find(&init_net, peer_ip); 1595 pdev = ip_dev_find(&init_net, peer_ip);
1596 if (!pdev) {
1597 err = -ENODEV;
1598 goto out;
1599 }
1596 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1600 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1597 n, pdev, 0); 1601 n, pdev, 0);
1598 if (!ep->l2t) 1602 if (!ep->l2t)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ee1c577238f7..3530c41fcd1f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
140 props->max_mr_size = ~0ull; 140 props->max_mr_size = ~0ull;
141 props->page_size_cap = dev->dev->caps.page_size_cap; 141 props->page_size_cap = dev->dev->caps.page_size_cap;
142 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; 142 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
143 props->max_qp_wr = dev->dev->caps.max_wqes; 143 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
144 props->max_sge = min(dev->dev->caps.max_sq_sg, 144 props->max_sge = min(dev->dev->caps.max_sq_sg,
145 dev->dev->caps.max_rq_sg); 145 dev->dev->caps.max_rq_sg);
146 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; 146 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
@@ -1084,12 +1084,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1084 int total_eqs = 0; 1084 int total_eqs = 0;
1085 int i, j, eq; 1085 int i, j, eq;
1086 1086
1087 /* Init eq table */ 1087 /* Legacy mode or comp_pool is not large enough */
1088 ibdev->eq_table = NULL; 1088 if (dev->caps.comp_pool == 0 ||
1089 ibdev->eq_added = 0; 1089 dev->caps.num_ports > dev->caps.comp_pool)
1090
1091 /* Legacy mode? */
1092 if (dev->caps.comp_pool == 0)
1093 return; 1090 return;
1094 1091
1095 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ 1092 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
@@ -1135,7 +1132,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1135static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) 1132static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1136{ 1133{
1137 int i; 1134 int i;
1138 int total_eqs; 1135
1136 /* no additional eqs were added */
1137 if (!ibdev->eq_table)
1138 return;
1139 1139
1140 /* Reset the advertised EQ number */ 1140 /* Reset the advertised EQ number */
1141 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; 1141 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
@@ -1148,12 +1148,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1148 mlx4_release_eq(dev, ibdev->eq_table[i]); 1148 mlx4_release_eq(dev, ibdev->eq_table[i]);
1149 } 1149 }
1150 1150
1151 total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
1152 memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
1153 kfree(ibdev->eq_table); 1151 kfree(ibdev->eq_table);
1154
1155 ibdev->eq_table = NULL;
1156 ibdev->eq_added = 0;
1157} 1152}
1158 1153
1159static void *mlx4_ib_add(struct mlx4_dev *dev) 1154static void *mlx4_ib_add(struct mlx4_dev *dev)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e62297cc77cc..ff36655d23d3 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -44,6 +44,14 @@
44#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
45#include <linux/mlx4/doorbell.h> 45#include <linux/mlx4/doorbell.h>
46 46
47enum {
48 MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
49 MLX4_IB_MAX_HEADROOM = 2048
50};
51
52#define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
53#define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
54
47struct mlx4_ib_ucontext { 55struct mlx4_ib_ucontext {
48 struct ib_ucontext ibucontext; 56 struct ib_ucontext ibucontext;
49 struct mlx4_uar uar; 57 struct mlx4_uar uar;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ceb33327091a..8d4ed24aef93 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
310 int is_user, int has_rq, struct mlx4_ib_qp *qp) 310 int is_user, int has_rq, struct mlx4_ib_qp *qp)
311{ 311{
312 /* Sanity check RQ size before proceeding */ 312 /* Sanity check RQ size before proceeding */
313 if (cap->max_recv_wr > dev->dev->caps.max_wqes || 313 if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
314 cap->max_recv_sge > dev->dev->caps.max_rq_sg) 314 cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
315 return -EINVAL; 315 return -EINVAL;
316 316
317 if (!has_rq) { 317 if (!has_rq) {
@@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
329 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); 329 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
330 } 330 }
331 331
332 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; 332 /* leave userspace return values as they were, so as not to break ABI */
333 cap->max_recv_sge = qp->rq.max_gs; 333 if (is_user) {
334 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
335 cap->max_recv_sge = qp->rq.max_gs;
336 } else {
337 cap->max_recv_wr = qp->rq.max_post =
338 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
339 cap->max_recv_sge = min(qp->rq.max_gs,
340 min(dev->dev->caps.max_sq_sg,
341 dev->dev->caps.max_rq_sg));
342 }
334 343
335 return 0; 344 return 0;
336} 345}
@@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
341 int s; 350 int s;
342 351
343 /* Sanity check SQ size before proceeding */ 352 /* Sanity check SQ size before proceeding */
344 if (cap->max_send_wr > dev->dev->caps.max_wqes || 353 if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
345 cap->max_send_sge > dev->dev->caps.max_sq_sg || 354 cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
346 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + 355 cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
347 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 356 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
348 return -EINVAL; 357 return -EINVAL;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 85a69c958559..48970af23679 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -61,6 +61,7 @@ struct ocrdma_dev_attr {
61 u32 max_inline_data; 61 u32 max_inline_data;
62 int max_send_sge; 62 int max_send_sge;
63 int max_recv_sge; 63 int max_recv_sge;
64 int max_srq_sge;
64 int max_mr; 65 int max_mr;
65 u64 max_mr_size; 66 u64 max_mr_size;
66 u32 max_num_mr_pbl; 67 u32 max_num_mr_pbl;
@@ -231,7 +232,6 @@ struct ocrdma_qp_hwq_info {
231 u32 entry_size; 232 u32 entry_size;
232 u32 max_cnt; 233 u32 max_cnt;
233 u32 max_wqe_idx; 234 u32 max_wqe_idx;
234 u32 free_delta;
235 u16 dbid; /* qid, where to ring the doorbell. */ 235 u16 dbid; /* qid, where to ring the doorbell. */
236 u32 len; 236 u32 len;
237 dma_addr_t pa; 237 dma_addr_t pa;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index a411a4e3193d..517ab20b727c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp {
101 u32 rsvd1; 101 u32 rsvd1;
102 u32 num_wqe_allocated; 102 u32 num_wqe_allocated;
103 u32 num_rqe_allocated; 103 u32 num_rqe_allocated;
104 u32 free_wqe_delta;
105 u32 free_rqe_delta;
106 u32 db_sq_offset; 104 u32 db_sq_offset;
107 u32 db_rq_offset; 105 u32 db_rq_offset;
108 u32 db_shift; 106 u32 db_shift;
@@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp {
126 u32 db_rq_offset; 124 u32 db_rq_offset;
127 u32 db_shift; 125 u32 db_shift;
128 126
129 u32 free_rqe_delta; 127 u64 rsvd2;
130 u32 rsvd2;
131 u64 rsvd3; 128 u64 rsvd3;
132} __packed; 129} __packed;
133 130
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 9b204b1ba336..71942af4fce9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
732 break; 732 break;
733 case OCRDMA_SRQ_LIMIT_EVENT: 733 case OCRDMA_SRQ_LIMIT_EVENT:
734 ib_evt.element.srq = &qp->srq->ibsrq; 734 ib_evt.element.srq = &qp->srq->ibsrq;
735 ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; 735 ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
736 srq_event = 1; 736 srq_event = 1;
737 qp_event = 0; 737 qp_event = 0;
738 break; 738 break;
@@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
990 struct ocrdma_dev_attr *attr, 990 struct ocrdma_dev_attr *attr,
991 struct ocrdma_mbx_query_config *rsp) 991 struct ocrdma_mbx_query_config *rsp)
992{ 992{
993 int max_q_mem;
994
995 attr->max_pd = 993 attr->max_pd =
996 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> 994 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
997 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; 995 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
1004 attr->max_recv_sge = (rsp->max_write_send_sge & 1002 attr->max_recv_sge = (rsp->max_write_send_sge &
1005 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> 1003 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1006 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; 1004 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
1005 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1006 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
1007 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
1007 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & 1008 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1008 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> 1009 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1009 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; 1010 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
1037 attr->max_inline_data = 1038 attr->max_inline_data =
1038 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + 1039 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1039 sizeof(struct ocrdma_sge)); 1040 sizeof(struct ocrdma_sge));
1040 max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
1041 /* hw can queue one less then the configured size,
1042 * so publish less by one to stack.
1043 */
1044 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1041 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1045 dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
1046 attr->ird = 1; 1042 attr->ird = 1;
1047 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; 1043 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1048 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; 1044 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
1049 } else 1045 }
1050 dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1; 1046 dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1051 dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1; 1047 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1048 dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1049 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
1052} 1050}
1053 1051
1054static int ocrdma_check_fw_config(struct ocrdma_dev *dev, 1052static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
@@ -1990,19 +1988,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
1990 max_wqe_allocated = 1 << max_wqe_allocated; 1988 max_wqe_allocated = 1 << max_wqe_allocated;
1991 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); 1989 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
1992 1990
1993 if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1994 qp->sq.free_delta = 0;
1995 qp->rq.free_delta = 1;
1996 } else
1997 qp->sq.free_delta = 1;
1998
1999 qp->sq.max_cnt = max_wqe_allocated; 1991 qp->sq.max_cnt = max_wqe_allocated;
2000 qp->sq.max_wqe_idx = max_wqe_allocated - 1; 1992 qp->sq.max_wqe_idx = max_wqe_allocated - 1;
2001 1993
2002 if (!attrs->srq) { 1994 if (!attrs->srq) {
2003 qp->rq.max_cnt = max_rqe_allocated; 1995 qp->rq.max_cnt = max_rqe_allocated;
2004 qp->rq.max_wqe_idx = max_rqe_allocated - 1; 1996 qp->rq.max_wqe_idx = max_rqe_allocated - 1;
2005 qp->rq.free_delta = 1;
2006 } 1997 }
2007} 1998}
2008 1999
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index a20d16eaae71..b050e629e9c3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -26,7 +26,6 @@
26 *******************************************************************/ 26 *******************************************************************/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/idr.h> 29#include <linux/idr.h>
31#include <rdma/ib_verbs.h> 30#include <rdma/ib_verbs.h>
32#include <rdma/ib_user_verbs.h> 31#include <rdma/ib_user_verbs.h>
@@ -98,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
98 sgid->raw[15] = mac_addr[5]; 97 sgid->raw[15] = mac_addr[5];
99} 98}
100 99
101static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, 100static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
102 bool is_vlan, u16 vlan_id) 101 bool is_vlan, u16 vlan_id)
103{ 102{
104 int i; 103 int i;
105 bool found = false;
106 union ib_gid new_sgid; 104 union ib_gid new_sgid;
107 int free_idx = OCRDMA_MAX_SGID;
108 unsigned long flags; 105 unsigned long flags;
109 106
110 memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); 107 memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@@ -116,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
116 if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, 113 if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
117 sizeof(union ib_gid))) { 114 sizeof(union ib_gid))) {
118 /* found free entry */ 115 /* found free entry */
119 if (!found) { 116 memcpy(&dev->sgid_tbl[i], &new_sgid,
120 free_idx = i; 117 sizeof(union ib_gid));
121 found = true; 118 spin_unlock_irqrestore(&dev->sgid_lock, flags);
122 break; 119 return true;
123 }
124 } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, 120 } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
125 sizeof(union ib_gid))) { 121 sizeof(union ib_gid))) {
126 /* entry already present, no addition is required. */ 122 /* entry already present, no addition is required. */
127 spin_unlock_irqrestore(&dev->sgid_lock, flags); 123 spin_unlock_irqrestore(&dev->sgid_lock, flags);
128 return; 124 return false;
129 } 125 }
130 } 126 }
131 /* if entry doesn't exist and if table has some space, add entry */
132 if (found)
133 memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
134 sizeof(union ib_gid));
135 spin_unlock_irqrestore(&dev->sgid_lock, flags); 127 spin_unlock_irqrestore(&dev->sgid_lock, flags);
128 return false;
136} 129}
137 130
138static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, 131static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@@ -168,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
168 ocrdma_get_guid(dev, &sgid->raw[8]); 161 ocrdma_get_guid(dev, &sgid->raw[8]);
169} 162}
170 163
171static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) 164#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
165static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
172{ 166{
173 struct net_device *netdev, *tmp; 167 struct net_device *netdev, *tmp;
174 u16 vlan_id; 168 u16 vlan_id;
@@ -176,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
176 170
177 netdev = dev->nic_info.netdev; 171 netdev = dev->nic_info.netdev;
178 172
179 ocrdma_add_default_sgid(dev);
180
181 rcu_read_lock(); 173 rcu_read_lock();
182 for_each_netdev_rcu(&init_net, tmp) { 174 for_each_netdev_rcu(&init_net, tmp) {
183 if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) { 175 if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@@ -195,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
195 } 187 }
196 } 188 }
197 rcu_read_unlock(); 189 rcu_read_unlock();
190}
191#else
192static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
193{
194
195}
196#endif /* VLAN */
197
198static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
199{
200 ocrdma_add_default_sgid(dev);
201 ocrdma_add_vlan_sgids(dev);
198 return 0; 202 return 0;
199} 203}
200 204
201#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 205#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
206defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
202 207
203static int ocrdma_inet6addr_event(struct notifier_block *notifier, 208static int ocrdma_inet6addr_event(struct notifier_block *notifier,
204 unsigned long event, void *ptr) 209 unsigned long event, void *ptr)
@@ -209,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
209 struct ib_event gid_event; 214 struct ib_event gid_event;
210 struct ocrdma_dev *dev; 215 struct ocrdma_dev *dev;
211 bool found = false; 216 bool found = false;
217 bool updated = false;
212 bool is_vlan = false; 218 bool is_vlan = false;
213 u16 vid = 0; 219 u16 vid = 0;
214 220
@@ -234,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
234 mutex_lock(&dev->dev_lock); 240 mutex_lock(&dev->dev_lock);
235 switch (event) { 241 switch (event) {
236 case NETDEV_UP: 242 case NETDEV_UP:
237 ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); 243 updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
238 break; 244 break;
239 case NETDEV_DOWN: 245 case NETDEV_DOWN:
240 found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); 246 updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
241 if (found) {
242 /* found the matching entry, notify
243 * the consumers about it
244 */
245 gid_event.device = &dev->ibdev;
246 gid_event.element.port_num = 1;
247 gid_event.event = IB_EVENT_GID_CHANGE;
248 ib_dispatch_event(&gid_event);
249 }
250 break; 247 break;
251 default: 248 default:
252 break; 249 break;
253 } 250 }
251 if (updated) {
252 /* GID table updated, notify the consumers about it */
253 gid_event.device = &dev->ibdev;
254 gid_event.element.port_num = 1;
255 gid_event.event = IB_EVENT_GID_CHANGE;
256 ib_dispatch_event(&gid_event);
257 }
254 mutex_unlock(&dev->dev_lock); 258 mutex_unlock(&dev->dev_lock);
255 return NOTIFY_OK; 259 return NOTIFY_OK;
256} 260}
@@ -259,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = {
259 .notifier_call = ocrdma_inet6addr_event 263 .notifier_call = ocrdma_inet6addr_event
260}; 264};
261 265
262#endif /* IPV6 */ 266#endif /* IPV6 and VLAN */
263 267
264static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, 268static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
265 u8 port_num) 269 u8 port_num)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 7fd80cc0f037..c75cbdfa87e7 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -418,6 +418,9 @@ enum {
418 418
419 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, 419 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
420 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, 420 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
421 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
422 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
423 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
421 424
422 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, 425 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
423 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, 426 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@@ -458,7 +461,7 @@ enum {
458 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, 461 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
459 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0, 462 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0,
460 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF << 463 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF <<
461 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, 464 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
462 465
463 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16, 466 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16,
464 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF << 467 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF <<
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e9f74d1b48f6..2e2e7aecc990 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
53 53
54 dev = get_ocrdma_dev(ibdev); 54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid)); 55 memset(sgid, 0, sizeof(*sgid));
56 if (index > OCRDMA_MAX_SGID) 56 if (index >= OCRDMA_MAX_SGID)
57 return -EINVAL; 57 return -EINVAL;
58 58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
83 IB_DEVICE_SHUTDOWN_PORT | 83 IB_DEVICE_SHUTDOWN_PORT |
84 IB_DEVICE_SYS_IMAGE_GUID | 84 IB_DEVICE_SYS_IMAGE_GUID |
85 IB_DEVICE_LOCAL_DMA_LKEY; 85 IB_DEVICE_LOCAL_DMA_LKEY;
86 attr->max_sge = dev->attr.max_send_sge; 86 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
87 attr->max_sge_rd = dev->attr.max_send_sge; 87 attr->max_sge_rd = 0;
88 attr->max_cq = dev->attr.max_cq; 88 attr->max_cq = dev->attr.max_cq;
89 attr->max_cqe = dev->attr.max_cqe; 89 attr->max_cqe = dev->attr.max_cqe;
90 attr->max_mr = dev->attr.max_mr; 90 attr->max_mr = dev->attr.max_mr;
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
97 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); 97 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
98 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; 98 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
99 attr->max_srq = (dev->attr.max_qp - 1); 99 attr->max_srq = (dev->attr.max_qp - 1);
100 attr->max_srq_sge = attr->max_sge; 100 attr->max_srq_sge = attr->max_srq_sge;
101 attr->max_srq_wr = dev->attr.max_rqe; 101 attr->max_srq_wr = dev->attr.max_rqe;
102 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; 102 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
103 attr->max_fast_reg_page_list_len = 0; 103 attr->max_fast_reg_page_list_len = 0;
@@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
940 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; 940 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
941 uresp.db_shift = 16; 941 uresp.db_shift = 16;
942 } 942 }
943 uresp.free_wqe_delta = qp->sq.free_delta;
944 uresp.free_rqe_delta = qp->rq.free_delta;
945 943
946 if (qp->dpp_enabled) { 944 if (qp->dpp_enabled) {
947 uresp.dpp_credit = dpp_credit_lmt; 945 uresp.dpp_credit = dpp_credit_lmt;
@@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1307 free_cnt = (q->max_cnt - q->head) + q->tail; 1305 free_cnt = (q->max_cnt - q->head) + q->tail;
1308 else 1306 else
1309 free_cnt = q->tail - q->head; 1307 free_cnt = q->tail - q->head;
1310 if (q->free_delta)
1311 free_cnt -= q->free_delta;
1312 return free_cnt; 1308 return free_cnt;
1313} 1309}
1314 1310
@@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
1501 (srq->pd->id * srq->dev->nic_info.db_page_size); 1497 (srq->pd->id * srq->dev->nic_info.db_page_size);
1502 uresp.db_page_size = srq->dev->nic_info.db_page_size; 1498 uresp.db_page_size = srq->dev->nic_info.db_page_size;
1503 uresp.num_rqe_allocated = srq->rq.max_cnt; 1499 uresp.num_rqe_allocated = srq->rq.max_cnt;
1504 uresp.free_rqe_delta = 1;
1505 if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1500 if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1506 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; 1501 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
1507 uresp.db_shift = 24; 1502 uresp.db_shift = 24;
@@ -2306,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2306 *stop = true; 2301 *stop = true;
2307 expand = false; 2302 expand = false;
2308 } 2303 }
2309 } else 2304 } else {
2305 *polled = true;
2310 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2306 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2307 }
2311 return expand; 2308 return expand;
2312} 2309}
2313 2310
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index e6483439f25f..633f03d80274 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -28,7 +28,6 @@
28#ifndef __OCRDMA_VERBS_H__ 28#ifndef __OCRDMA_VERBS_H__
29#define __OCRDMA_VERBS_H__ 29#define __OCRDMA_VERBS_H__
30 30
31#include <linux/version.h>
32int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, 31int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
33 struct ib_send_wr **bad_wr); 32 struct ib_send_wr **bad_wr);
34int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, 33int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index d90a421e9cac..a2e418cba0ff 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
547 spin_unlock_irqrestore(&iommu->lock, flags); 547 spin_unlock_irqrestore(&iommu->lock, flags);
548} 548}
549 549
550static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) 550static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
551{ 551{
552 struct amd_iommu_fault fault; 552 struct amd_iommu_fault fault;
553 volatile u64 *raw;
554 int i;
555 553
556 INC_STATS_COUNTER(pri_requests); 554 INC_STATS_COUNTER(pri_requests);
557 555
558 raw = (u64 *)(iommu->ppr_log + head);
559
560 /*
561 * Hardware bug: Interrupt may arrive before the entry is written to
562 * memory. If this happens we need to wait for the entry to arrive.
563 */
564 for (i = 0; i < LOOP_TIMEOUT; ++i) {
565 if (PPR_REQ_TYPE(raw[0]) != 0)
566 break;
567 udelay(1);
568 }
569
570 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { 556 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
571 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); 557 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
572 return; 558 return;
@@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
578 fault.tag = PPR_TAG(raw[0]); 564 fault.tag = PPR_TAG(raw[0]);
579 fault.flags = PPR_FLAGS(raw[0]); 565 fault.flags = PPR_FLAGS(raw[0]);
580 566
581 /*
582 * To detect the hardware bug we need to clear the entry
583 * to back to zero.
584 */
585 raw[0] = raw[1] = 0;
586
587 atomic_notifier_call_chain(&ppr_notifier, 0, &fault); 567 atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
588} 568}
589 569
@@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
595 if (iommu->ppr_log == NULL) 575 if (iommu->ppr_log == NULL)
596 return; 576 return;
597 577
578 /* enable ppr interrupts again */
579 writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
580
598 spin_lock_irqsave(&iommu->lock, flags); 581 spin_lock_irqsave(&iommu->lock, flags);
599 582
600 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 583 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
601 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 584 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
602 585
603 while (head != tail) { 586 while (head != tail) {
587 volatile u64 *raw;
588 u64 entry[2];
589 int i;
604 590
605 /* Handle PPR entry */ 591 raw = (u64 *)(iommu->ppr_log + head);
606 iommu_handle_ppr_entry(iommu, head); 592
593 /*
594 * Hardware bug: Interrupt may arrive before the entry is
595 * written to memory. If this happens we need to wait for the
596 * entry to arrive.
597 */
598 for (i = 0; i < LOOP_TIMEOUT; ++i) {
599 if (PPR_REQ_TYPE(raw[0]) != 0)
600 break;
601 udelay(1);
602 }
603
604 /* Avoid memcpy function-call overhead */
605 entry[0] = raw[0];
606 entry[1] = raw[1];
607 607
608 /* Update and refresh ring-buffer state*/ 608 /*
609 * To detect the hardware bug we need to clear the entry
610 * back to zero.
611 */
612 raw[0] = raw[1] = 0UL;
613
614 /* Update head pointer of hardware ring-buffer */
609 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; 615 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
610 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 616 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
617
618 /*
619 * Release iommu->lock because ppr-handling might need to
620 * re-aquire it
621 */
622 spin_unlock_irqrestore(&iommu->lock, flags);
623
624 /* Handle PPR entry */
625 iommu_handle_ppr_entry(iommu, entry);
626
627 spin_lock_irqsave(&iommu->lock, flags);
628
629 /* Refresh ring-buffer information */
630 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
611 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 631 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
612 } 632 }
613 633
614 /* enable ppr interrupts again */
615 writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
616
617 spin_unlock_irqrestore(&iommu->lock, flags); 634 spin_unlock_irqrestore(&iommu->lock, flags);
618} 635}
619 636
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index c56790375e0f..542024ba6dba 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1029 if (!iommu->dev) 1029 if (!iommu->dev)
1030 return 1; 1030 return 1;
1031 1031
1032 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1033 PCI_DEVFN(0, 0));
1034
1032 iommu->cap_ptr = h->cap_ptr; 1035 iommu->cap_ptr = h->cap_ptr;
1033 iommu->pci_seg = h->pci_seg; 1036 iommu->pci_seg = h->pci_seg;
1034 iommu->mmio_phys = h->mmio_phys; 1037 iommu->mmio_phys = h->mmio_phys;
@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1323{ 1326{
1324 int i, j; 1327 int i, j;
1325 u32 ioc_feature_control; 1328 u32 ioc_feature_control;
1326 struct pci_dev *pdev = NULL; 1329 struct pci_dev *pdev = iommu->root_pdev;
1327 1330
1328 /* RD890 BIOSes may not have completely reconfigured the iommu */ 1331 /* RD890 BIOSes may not have completely reconfigured the iommu */
1329 if (!is_rd890_iommu(iommu->dev)) 1332 if (!is_rd890_iommu(iommu->dev) || !pdev)
1330 return; 1333 return;
1331 1334
1332 /* 1335 /*
1333 * First, we need to ensure that the iommu is enabled. This is 1336 * First, we need to ensure that the iommu is enabled. This is
1334 * controlled by a register in the northbridge 1337 * controlled by a register in the northbridge
1335 */ 1338 */
1336 pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
1337
1338 if (!pdev)
1339 return;
1340 1339
1341 /* Select Northbridge indirect register 0x75 and enable writing */ 1340 /* Select Northbridge indirect register 0x75 and enable writing */
1342 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 1341 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1346 if (!(ioc_feature_control & 0x1)) 1345 if (!(ioc_feature_control & 0x1))
1347 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 1346 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1348 1347
1349 pci_dev_put(pdev);
1350
1351 /* Restore the iommu BAR */ 1348 /* Restore the iommu BAR */
1352 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 1349 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1353 iommu->stored_addr_lo); 1350 iommu->stored_addr_lo);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 2452f3b71736..24355559a2ad 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -481,6 +481,9 @@ struct amd_iommu {
481 /* Pointer to PCI device of this IOMMU */ 481 /* Pointer to PCI device of this IOMMU */
482 struct pci_dev *dev; 482 struct pci_dev *dev;
483 483
484 /* Cache pdev to root device for resume quirks */
485 struct pci_dev *root_pdev;
486
484 /* physical address of MMIO space */ 487 /* physical address of MMIO space */
485 u64 mmio_phys; 488 u64 mmio_phys;
486 /* virtual address of MMIO space */ 489 /* virtual address of MMIO space */
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 04cb8c88d74b..12b2b55c519e 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -379,7 +379,7 @@ config LEDS_NETXBIG
379 379
380config LEDS_ASIC3 380config LEDS_ASIC3
381 bool "LED support for the HTC ASIC3" 381 bool "LED support for the HTC ASIC3"
382 depends on LEDS_CLASS 382 depends on LEDS_CLASS=y
383 depends on MFD_ASIC3 383 depends on MFD_ASIC3
384 default y 384 default y
385 help 385 help
@@ -390,7 +390,7 @@ config LEDS_ASIC3
390 390
391config LEDS_RENESAS_TPU 391config LEDS_RENESAS_TPU
392 bool "LED support for Renesas TPU" 392 bool "LED support for Renesas TPU"
393 depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO 393 depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
394 help 394 help
395 This option enables build of the LED TPU platform driver, 395 This option enables build of the LED TPU platform driver,
396 suitable to drive any TPU channel on newer Renesas SoCs. 396 suitable to drive any TPU channel on newer Renesas SoCs.
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 8ee92c81aec2..e663e6f413e9 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev)
29 led_cdev->brightness = led_cdev->brightness_get(led_cdev); 29 led_cdev->brightness = led_cdev->brightness_get(led_cdev);
30} 30}
31 31
32static ssize_t led_brightness_show(struct device *dev, 32static ssize_t led_brightness_show(struct device *dev,
33 struct device_attribute *attr, char *buf) 33 struct device_attribute *attr, char *buf)
34{ 34{
35 struct led_classdev *led_cdev = dev_get_drvdata(dev); 35 struct led_classdev *led_cdev = dev_get_drvdata(dev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index d6860043f6f9..d65353d8d3fc 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
44 if (!led_cdev->blink_brightness) 44 if (!led_cdev->blink_brightness)
45 led_cdev->blink_brightness = led_cdev->max_brightness; 45 led_cdev->blink_brightness = led_cdev->max_brightness;
46 46
47 if (led_get_trigger_data(led_cdev) &&
48 delay_on == led_cdev->blink_delay_on &&
49 delay_off == led_cdev->blink_delay_off)
50 return;
51
52 led_stop_software_blink(led_cdev);
53
54 led_cdev->blink_delay_on = delay_on; 47 led_cdev->blink_delay_on = delay_on;
55 led_cdev->blink_delay_off = delay_off; 48 led_cdev->blink_delay_off = delay_off;
56 49
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 754f38f8a692..638dae048b4f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/time.h> 19#include <linux/time.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/delay.h>
21#include <scsi/scsi_dh.h> 22#include <scsi/scsi_dh.h>
22#include <linux/atomic.h> 23#include <linux/atomic.h>
23 24
@@ -61,11 +62,11 @@ struct multipath {
61 struct list_head list; 62 struct list_head list;
62 struct dm_target *ti; 63 struct dm_target *ti;
63 64
64 spinlock_t lock;
65
66 const char *hw_handler_name; 65 const char *hw_handler_name;
67 char *hw_handler_params; 66 char *hw_handler_params;
68 67
68 spinlock_t lock;
69
69 unsigned nr_priority_groups; 70 unsigned nr_priority_groups;
70 struct list_head priority_groups; 71 struct list_head priority_groups;
71 72
@@ -81,16 +82,17 @@ struct multipath {
81 struct priority_group *next_pg; /* Switch to this PG if set */ 82 struct priority_group *next_pg; /* Switch to this PG if set */
82 unsigned repeat_count; /* I/Os left before calling PS again */ 83 unsigned repeat_count; /* I/Os left before calling PS again */
83 84
84 unsigned queue_io; /* Must we queue all I/O? */ 85 unsigned queue_io:1; /* Must we queue all I/O? */
85 unsigned queue_if_no_path; /* Queue I/O if last path fails? */ 86 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
86 unsigned saved_queue_if_no_path;/* Saved state during suspension */ 87 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
88
87 unsigned pg_init_retries; /* Number of times to retry pg_init */ 89 unsigned pg_init_retries; /* Number of times to retry pg_init */
88 unsigned pg_init_count; /* Number of times pg_init called */ 90 unsigned pg_init_count; /* Number of times pg_init called */
89 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ 91 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
90 92
93 unsigned queue_size;
91 struct work_struct process_queued_ios; 94 struct work_struct process_queued_ios;
92 struct list_head queued_ios; 95 struct list_head queued_ios;
93 unsigned queue_size;
94 96
95 struct work_struct trigger_event; 97 struct work_struct trigger_event;
96 98
@@ -328,14 +330,18 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
328 /* 330 /*
329 * Loop through priority groups until we find a valid path. 331 * Loop through priority groups until we find a valid path.
330 * First time we skip PGs marked 'bypassed'. 332 * First time we skip PGs marked 'bypassed'.
331 * Second time we only try the ones we skipped. 333 * Second time we only try the ones we skipped, but set
334 * pg_init_delay_retry so we do not hammer controllers.
332 */ 335 */
333 do { 336 do {
334 list_for_each_entry(pg, &m->priority_groups, list) { 337 list_for_each_entry(pg, &m->priority_groups, list) {
335 if (pg->bypassed == bypassed) 338 if (pg->bypassed == bypassed)
336 continue; 339 continue;
337 if (!__choose_path_in_pg(m, pg, nr_bytes)) 340 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
341 if (!bypassed)
342 m->pg_init_delay_retry = 1;
338 return; 343 return;
344 }
339 } 345 }
340 } while (bypassed--); 346 } while (bypassed--);
341 347
@@ -481,9 +487,6 @@ static void process_queued_ios(struct work_struct *work)
481 487
482 spin_lock_irqsave(&m->lock, flags); 488 spin_lock_irqsave(&m->lock, flags);
483 489
484 if (!m->queue_size)
485 goto out;
486
487 if (!m->current_pgpath) 490 if (!m->current_pgpath)
488 __choose_pgpath(m, 0); 491 __choose_pgpath(m, 0);
489 492
@@ -496,7 +499,6 @@ static void process_queued_ios(struct work_struct *work)
496 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) 499 if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
497 __pg_init_all_paths(m); 500 __pg_init_all_paths(m);
498 501
499out:
500 spin_unlock_irqrestore(&m->lock, flags); 502 spin_unlock_irqrestore(&m->lock, flags);
501 if (!must_queue) 503 if (!must_queue)
502 dispatch_queued_ios(m); 504 dispatch_queued_ios(m);
@@ -1517,11 +1519,16 @@ out:
1517static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, 1519static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1518 unsigned long arg) 1520 unsigned long arg)
1519{ 1521{
1520 struct multipath *m = (struct multipath *) ti->private; 1522 struct multipath *m = ti->private;
1521 struct block_device *bdev = NULL; 1523 struct block_device *bdev;
1522 fmode_t mode = 0; 1524 fmode_t mode;
1523 unsigned long flags; 1525 unsigned long flags;
1524 int r = 0; 1526 int r;
1527
1528again:
1529 bdev = NULL;
1530 mode = 0;
1531 r = 0;
1525 1532
1526 spin_lock_irqsave(&m->lock, flags); 1533 spin_lock_irqsave(&m->lock, flags);
1527 1534
@@ -1546,6 +1553,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1546 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) 1553 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1547 r = scsi_verify_blk_ioctl(NULL, cmd); 1554 r = scsi_verify_blk_ioctl(NULL, cmd);
1548 1555
1556 if (r == -EAGAIN && !fatal_signal_pending(current)) {
1557 queue_work(kmultipathd, &m->process_queued_ios);
1558 msleep(10);
1559 goto again;
1560 }
1561
1549 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); 1562 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
1550} 1563}
1551 1564
@@ -1643,7 +1656,7 @@ out:
1643 *---------------------------------------------------------------*/ 1656 *---------------------------------------------------------------*/
1644static struct target_type multipath_target = { 1657static struct target_type multipath_target = {
1645 .name = "multipath", 1658 .name = "multipath",
1646 .version = {1, 3, 0}, 1659 .version = {1, 4, 0},
1647 .module = THIS_MODULE, 1660 .module = THIS_MODULE,
1648 .ctr = multipath_ctr, 1661 .ctr = multipath_ctr,
1649 .dtr = multipath_dtr, 1662 .dtr = multipath_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 737d38865b69..3e2907f0bc46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1082,12 +1082,89 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1082 return 0; 1082 return 0;
1083} 1083}
1084 1084
1085static int __get_held_metadata_root(struct dm_pool_metadata *pmd, 1085static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1086 dm_block_t *result) 1086{
1087 int r, inc;
1088 struct thin_disk_superblock *disk_super;
1089 struct dm_block *copy, *sblock;
1090 dm_block_t held_root;
1091
1092 /*
1093 * Copy the superblock.
1094 */
1095 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1096 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1097 &sb_validator, &copy, &inc);
1098 if (r)
1099 return r;
1100
1101 BUG_ON(!inc);
1102
1103 held_root = dm_block_location(copy);
1104 disk_super = dm_block_data(copy);
1105
1106 if (le64_to_cpu(disk_super->held_root)) {
1107 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1108
1109 dm_tm_dec(pmd->tm, held_root);
1110 dm_tm_unlock(pmd->tm, copy);
1111 pmd->need_commit = 1;
1112
1113 return -EBUSY;
1114 }
1115
1116 /*
1117 * Wipe the spacemap since we're not publishing this.
1118 */
1119 memset(&disk_super->data_space_map_root, 0,
1120 sizeof(disk_super->data_space_map_root));
1121 memset(&disk_super->metadata_space_map_root, 0,
1122 sizeof(disk_super->metadata_space_map_root));
1123
1124 /*
1125 * Increment the data structures that need to be preserved.
1126 */
1127 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1128 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1129 dm_tm_unlock(pmd->tm, copy);
1130
1131 /*
1132 * Write the held root into the superblock.
1133 */
1134 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1135 &sb_validator, &sblock);
1136 if (r) {
1137 dm_tm_dec(pmd->tm, held_root);
1138 pmd->need_commit = 1;
1139 return r;
1140 }
1141
1142 disk_super = dm_block_data(sblock);
1143 disk_super->held_root = cpu_to_le64(held_root);
1144 dm_bm_unlock(sblock);
1145
1146 pmd->need_commit = 1;
1147
1148 return 0;
1149}
1150
1151int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1152{
1153 int r;
1154
1155 down_write(&pmd->root_lock);
1156 r = __reserve_metadata_snap(pmd);
1157 up_write(&pmd->root_lock);
1158
1159 return r;
1160}
1161
1162static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1087{ 1163{
1088 int r; 1164 int r;
1089 struct thin_disk_superblock *disk_super; 1165 struct thin_disk_superblock *disk_super;
1090 struct dm_block *sblock; 1166 struct dm_block *sblock, *copy;
1167 dm_block_t held_root;
1091 1168
1092 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, 1169 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1093 &sb_validator, &sblock); 1170 &sb_validator, &sblock);
@@ -1095,18 +1172,65 @@ static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
1095 return r; 1172 return r;
1096 1173
1097 disk_super = dm_block_data(sblock); 1174 disk_super = dm_block_data(sblock);
1175 held_root = le64_to_cpu(disk_super->held_root);
1176 disk_super->held_root = cpu_to_le64(0);
1177 pmd->need_commit = 1;
1178
1179 dm_bm_unlock(sblock);
1180
1181 if (!held_root) {
1182 DMWARN("No pool metadata snapshot found: nothing to release.");
1183 return -EINVAL;
1184 }
1185
1186 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
1187 if (r)
1188 return r;
1189
1190 disk_super = dm_block_data(copy);
1191 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
1192 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
1193 dm_sm_dec_block(pmd->metadata_sm, held_root);
1194
1195 return dm_tm_unlock(pmd->tm, copy);
1196}
1197
1198int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1199{
1200 int r;
1201
1202 down_write(&pmd->root_lock);
1203 r = __release_metadata_snap(pmd);
1204 up_write(&pmd->root_lock);
1205
1206 return r;
1207}
1208
1209static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1210 dm_block_t *result)
1211{
1212 int r;
1213 struct thin_disk_superblock *disk_super;
1214 struct dm_block *sblock;
1215
1216 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1217 &sb_validator, &sblock);
1218 if (r)
1219 return r;
1220
1221 disk_super = dm_block_data(sblock);
1098 *result = le64_to_cpu(disk_super->held_root); 1222 *result = le64_to_cpu(disk_super->held_root);
1099 1223
1100 return dm_bm_unlock(sblock); 1224 return dm_bm_unlock(sblock);
1101} 1225}
1102 1226
1103int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, 1227int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1104 dm_block_t *result) 1228 dm_block_t *result)
1105{ 1229{
1106 int r; 1230 int r;
1107 1231
1108 down_read(&pmd->root_lock); 1232 down_read(&pmd->root_lock);
1109 r = __get_held_metadata_root(pmd, result); 1233 r = __get_metadata_snap(pmd, result);
1110 up_read(&pmd->root_lock); 1234 up_read(&pmd->root_lock);
1111 1235
1112 return r; 1236 return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index ed4725e67c96..b88918ccdaf6 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -90,11 +90,18 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
90 90
91/* 91/*
92 * Hold/get root for userspace transaction. 92 * Hold/get root for userspace transaction.
93 *
94 * The metadata snapshot is a copy of the current superblock (minus the
95 * space maps). Userland can access the data structures for READ
96 * operations only. A small performance hit is incurred by providing this
97 * copy of the metadata to userland due to extra copy-on-write operations
98 * on the metadata nodes. Release this as soon as you finish with it.
93 */ 99 */
94int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd); 100int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
101int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
95 102
96int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, 103int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
97 dm_block_t *result); 104 dm_block_t *result);
98 105
99/* 106/*
100 * Actions on a single virtual device. 107 * Actions on a single virtual device.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index eb3d138ff55a..37fdaf81bd1f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -111,7 +111,7 @@ struct cell_key {
111 dm_block_t block; 111 dm_block_t block;
112}; 112};
113 113
114struct cell { 114struct dm_bio_prison_cell {
115 struct hlist_node list; 115 struct hlist_node list;
116 struct bio_prison *prison; 116 struct bio_prison *prison;
117 struct cell_key key; 117 struct cell_key key;
@@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
141 return n; 141 return n;
142} 142}
143 143
144static struct kmem_cache *_cell_cache;
145
144/* 146/*
145 * @nr_cells should be the number of cells you want in use _concurrently_. 147 * @nr_cells should be the number of cells you want in use _concurrently_.
146 * Don't confuse it with the number of distinct keys. 148 * Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
157 return NULL; 159 return NULL;
158 160
159 spin_lock_init(&prison->lock); 161 spin_lock_init(&prison->lock);
160 prison->cell_pool = mempool_create_kmalloc_pool(nr_cells, 162 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
161 sizeof(struct cell));
162 if (!prison->cell_pool) { 163 if (!prison->cell_pool) {
163 kfree(prison); 164 kfree(prison);
164 return NULL; 165 return NULL;
@@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
194 (lhs->block == rhs->block); 195 (lhs->block == rhs->block);
195} 196}
196 197
197static struct cell *__search_bucket(struct hlist_head *bucket, 198static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
198 struct cell_key *key) 199 struct cell_key *key)
199{ 200{
200 struct cell *cell; 201 struct dm_bio_prison_cell *cell;
201 struct hlist_node *tmp; 202 struct hlist_node *tmp;
202 203
203 hlist_for_each_entry(cell, tmp, bucket, list) 204 hlist_for_each_entry(cell, tmp, bucket, list)
@@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
214 * Returns 1 if the cell was already held, 0 if @inmate is the new holder. 215 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
215 */ 216 */
216static int bio_detain(struct bio_prison *prison, struct cell_key *key, 217static int bio_detain(struct bio_prison *prison, struct cell_key *key,
217 struct bio *inmate, struct cell **ref) 218 struct bio *inmate, struct dm_bio_prison_cell **ref)
218{ 219{
219 int r = 1; 220 int r = 1;
220 unsigned long flags; 221 unsigned long flags;
221 uint32_t hash = hash_key(prison, key); 222 uint32_t hash = hash_key(prison, key);
222 struct cell *cell, *cell2; 223 struct dm_bio_prison_cell *cell, *cell2;
223 224
224 BUG_ON(hash > prison->nr_buckets); 225 BUG_ON(hash > prison->nr_buckets);
225 226
@@ -273,7 +274,7 @@ out:
273/* 274/*
274 * @inmates must have been initialised prior to this call 275 * @inmates must have been initialised prior to this call
275 */ 276 */
276static void __cell_release(struct cell *cell, struct bio_list *inmates) 277static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
277{ 278{
278 struct bio_prison *prison = cell->prison; 279 struct bio_prison *prison = cell->prison;
279 280
@@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
287 mempool_free(cell, prison->cell_pool); 288 mempool_free(cell, prison->cell_pool);
288} 289}
289 290
290static void cell_release(struct cell *cell, struct bio_list *bios) 291static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
291{ 292{
292 unsigned long flags; 293 unsigned long flags;
293 struct bio_prison *prison = cell->prison; 294 struct bio_prison *prison = cell->prison;
@@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
303 * bio may be in the cell. This function releases the cell, and also does 304 * bio may be in the cell. This function releases the cell, and also does
304 * a sanity check. 305 * a sanity check.
305 */ 306 */
306static void __cell_release_singleton(struct cell *cell, struct bio *bio) 307static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
307{ 308{
308 BUG_ON(cell->holder != bio); 309 BUG_ON(cell->holder != bio);
309 BUG_ON(!bio_list_empty(&cell->bios)); 310 BUG_ON(!bio_list_empty(&cell->bios));
@@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio)
311 __cell_release(cell, NULL); 312 __cell_release(cell, NULL);
312} 313}
313 314
314static void cell_release_singleton(struct cell *cell, struct bio *bio) 315static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
315{ 316{
316 unsigned long flags; 317 unsigned long flags;
317 struct bio_prison *prison = cell->prison; 318 struct bio_prison *prison = cell->prison;
@@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio)
324/* 325/*
325 * Sometimes we don't want the holder, just the additional bios. 326 * Sometimes we don't want the holder, just the additional bios.
326 */ 327 */
327static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates) 328static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
329 struct bio_list *inmates)
328{ 330{
329 struct bio_prison *prison = cell->prison; 331 struct bio_prison *prison = cell->prison;
330 332
@@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates
334 mempool_free(cell, prison->cell_pool); 336 mempool_free(cell, prison->cell_pool);
335} 337}
336 338
337static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) 339static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
340 struct bio_list *inmates)
338{ 341{
339 unsigned long flags; 342 unsigned long flags;
340 struct bio_prison *prison = cell->prison; 343 struct bio_prison *prison = cell->prison;
@@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
344 spin_unlock_irqrestore(&prison->lock, flags); 347 spin_unlock_irqrestore(&prison->lock, flags);
345} 348}
346 349
347static void cell_error(struct cell *cell) 350static void cell_error(struct dm_bio_prison_cell *cell)
348{ 351{
349 struct bio_prison *prison = cell->prison; 352 struct bio_prison *prison = cell->prison;
350 struct bio_list bios; 353 struct bio_list bios;
@@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
491 * also provides the interface for creating and destroying internal 494 * also provides the interface for creating and destroying internal
492 * devices. 495 * devices.
493 */ 496 */
494struct new_mapping; 497struct dm_thin_new_mapping;
495 498
496struct pool_features { 499struct pool_features {
497 unsigned zero_new_blocks:1; 500 unsigned zero_new_blocks:1;
@@ -537,7 +540,7 @@ struct pool {
537 struct deferred_set shared_read_ds; 540 struct deferred_set shared_read_ds;
538 struct deferred_set all_io_ds; 541 struct deferred_set all_io_ds;
539 542
540 struct new_mapping *next_mapping; 543 struct dm_thin_new_mapping *next_mapping;
541 mempool_t *mapping_pool; 544 mempool_t *mapping_pool;
542 mempool_t *endio_hook_pool; 545 mempool_t *endio_hook_pool;
543}; 546};
@@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
630 633
631/*----------------------------------------------------------------*/ 634/*----------------------------------------------------------------*/
632 635
633struct endio_hook { 636struct dm_thin_endio_hook {
634 struct thin_c *tc; 637 struct thin_c *tc;
635 struct deferred_entry *shared_read_entry; 638 struct deferred_entry *shared_read_entry;
636 struct deferred_entry *all_io_entry; 639 struct deferred_entry *all_io_entry;
637 struct new_mapping *overwrite_mapping; 640 struct dm_thin_new_mapping *overwrite_mapping;
638}; 641};
639 642
640static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) 643static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
647 bio_list_init(master); 650 bio_list_init(master);
648 651
649 while ((bio = bio_list_pop(&bios))) { 652 while ((bio = bio_list_pop(&bios))) {
650 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 653 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
654
651 if (h->tc == tc) 655 if (h->tc == tc)
652 bio_endio(bio, DM_ENDIO_REQUEUE); 656 bio_endio(bio, DM_ENDIO_REQUEUE);
653 else 657 else
@@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool)
736/* 740/*
737 * Bio endio functions. 741 * Bio endio functions.
738 */ 742 */
739struct new_mapping { 743struct dm_thin_new_mapping {
740 struct list_head list; 744 struct list_head list;
741 745
742 unsigned quiesced:1; 746 unsigned quiesced:1;
@@ -746,7 +750,7 @@ struct new_mapping {
746 struct thin_c *tc; 750 struct thin_c *tc;
747 dm_block_t virt_block; 751 dm_block_t virt_block;
748 dm_block_t data_block; 752 dm_block_t data_block;
749 struct cell *cell, *cell2; 753 struct dm_bio_prison_cell *cell, *cell2;
750 int err; 754 int err;
751 755
752 /* 756 /*
@@ -759,7 +763,7 @@ struct new_mapping {
759 bio_end_io_t *saved_bi_end_io; 763 bio_end_io_t *saved_bi_end_io;
760}; 764};
761 765
762static void __maybe_add_mapping(struct new_mapping *m) 766static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
763{ 767{
764 struct pool *pool = m->tc->pool; 768 struct pool *pool = m->tc->pool;
765 769
@@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
772static void copy_complete(int read_err, unsigned long write_err, void *context) 776static void copy_complete(int read_err, unsigned long write_err, void *context)
773{ 777{
774 unsigned long flags; 778 unsigned long flags;
775 struct new_mapping *m = context; 779 struct dm_thin_new_mapping *m = context;
776 struct pool *pool = m->tc->pool; 780 struct pool *pool = m->tc->pool;
777 781
778 m->err = read_err || write_err ? -EIO : 0; 782 m->err = read_err || write_err ? -EIO : 0;
@@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
786static void overwrite_endio(struct bio *bio, int err) 790static void overwrite_endio(struct bio *bio, int err)
787{ 791{
788 unsigned long flags; 792 unsigned long flags;
789 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 793 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
790 struct new_mapping *m = h->overwrite_mapping; 794 struct dm_thin_new_mapping *m = h->overwrite_mapping;
791 struct pool *pool = m->tc->pool; 795 struct pool *pool = m->tc->pool;
792 796
793 m->err = err; 797 m->err = err;
@@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err)
811/* 815/*
812 * This sends the bios in the cell back to the deferred_bios list. 816 * This sends the bios in the cell back to the deferred_bios list.
813 */ 817 */
814static void cell_defer(struct thin_c *tc, struct cell *cell, 818static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
815 dm_block_t data_block) 819 dm_block_t data_block)
816{ 820{
817 struct pool *pool = tc->pool; 821 struct pool *pool = tc->pool;
@@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
828 * Same as cell_defer above, except it omits one particular detainee, 832 * Same as cell_defer above, except it omits one particular detainee,
829 * a write bio that covers the block and has already been processed. 833 * a write bio that covers the block and has already been processed.
830 */ 834 */
831static void cell_defer_except(struct thin_c *tc, struct cell *cell) 835static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
832{ 836{
833 struct bio_list bios; 837 struct bio_list bios;
834 struct pool *pool = tc->pool; 838 struct pool *pool = tc->pool;
@@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell)
843 wake_worker(pool); 847 wake_worker(pool);
844} 848}
845 849
846static void process_prepared_mapping(struct new_mapping *m) 850static void process_prepared_mapping(struct dm_thin_new_mapping *m)
847{ 851{
848 struct thin_c *tc = m->tc; 852 struct thin_c *tc = m->tc;
849 struct bio *bio; 853 struct bio *bio;
@@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m)
886 mempool_free(m, tc->pool->mapping_pool); 890 mempool_free(m, tc->pool->mapping_pool);
887} 891}
888 892
889static void process_prepared_discard(struct new_mapping *m) 893static void process_prepared_discard(struct dm_thin_new_mapping *m)
890{ 894{
891 int r; 895 int r;
892 struct thin_c *tc = m->tc; 896 struct thin_c *tc = m->tc;
@@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m)
909} 913}
910 914
911static void process_prepared(struct pool *pool, struct list_head *head, 915static void process_prepared(struct pool *pool, struct list_head *head,
912 void (*fn)(struct new_mapping *)) 916 void (*fn)(struct dm_thin_new_mapping *))
913{ 917{
914 unsigned long flags; 918 unsigned long flags;
915 struct list_head maps; 919 struct list_head maps;
916 struct new_mapping *m, *tmp; 920 struct dm_thin_new_mapping *m, *tmp;
917 921
918 INIT_LIST_HEAD(&maps); 922 INIT_LIST_HEAD(&maps);
919 spin_lock_irqsave(&pool->lock, flags); 923 spin_lock_irqsave(&pool->lock, flags);
@@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool)
957 return pool->next_mapping ? 0 : -ENOMEM; 961 return pool->next_mapping ? 0 : -ENOMEM;
958} 962}
959 963
960static struct new_mapping *get_next_mapping(struct pool *pool) 964static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
961{ 965{
962 struct new_mapping *r = pool->next_mapping; 966 struct dm_thin_new_mapping *r = pool->next_mapping;
963 967
964 BUG_ON(!pool->next_mapping); 968 BUG_ON(!pool->next_mapping);
965 969
@@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
971static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 975static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
972 struct dm_dev *origin, dm_block_t data_origin, 976 struct dm_dev *origin, dm_block_t data_origin,
973 dm_block_t data_dest, 977 dm_block_t data_dest,
974 struct cell *cell, struct bio *bio) 978 struct dm_bio_prison_cell *cell, struct bio *bio)
975{ 979{
976 int r; 980 int r;
977 struct pool *pool = tc->pool; 981 struct pool *pool = tc->pool;
978 struct new_mapping *m = get_next_mapping(pool); 982 struct dm_thin_new_mapping *m = get_next_mapping(pool);
979 983
980 INIT_LIST_HEAD(&m->list); 984 INIT_LIST_HEAD(&m->list);
981 m->quiesced = 0; 985 m->quiesced = 0;
@@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
997 * bio immediately. Otherwise we use kcopyd to clone the data first. 1001 * bio immediately. Otherwise we use kcopyd to clone the data first.
998 */ 1002 */
999 if (io_overwrites_block(pool, bio)) { 1003 if (io_overwrites_block(pool, bio)) {
1000 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1004 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1005
1001 h->overwrite_mapping = m; 1006 h->overwrite_mapping = m;
1002 m->bio = bio; 1007 m->bio = bio;
1003 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1008 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
@@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1025 1030
1026static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, 1031static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1027 dm_block_t data_origin, dm_block_t data_dest, 1032 dm_block_t data_origin, dm_block_t data_dest,
1028 struct cell *cell, struct bio *bio) 1033 struct dm_bio_prison_cell *cell, struct bio *bio)
1029{ 1034{
1030 schedule_copy(tc, virt_block, tc->pool_dev, 1035 schedule_copy(tc, virt_block, tc->pool_dev,
1031 data_origin, data_dest, cell, bio); 1036 data_origin, data_dest, cell, bio);
@@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1033 1038
1034static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 1039static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1035 dm_block_t data_dest, 1040 dm_block_t data_dest,
1036 struct cell *cell, struct bio *bio) 1041 struct dm_bio_prison_cell *cell, struct bio *bio)
1037{ 1042{
1038 schedule_copy(tc, virt_block, tc->origin_dev, 1043 schedule_copy(tc, virt_block, tc->origin_dev,
1039 virt_block, data_dest, cell, bio); 1044 virt_block, data_dest, cell, bio);
1040} 1045}
1041 1046
1042static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, 1047static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1043 dm_block_t data_block, struct cell *cell, 1048 dm_block_t data_block, struct dm_bio_prison_cell *cell,
1044 struct bio *bio) 1049 struct bio *bio)
1045{ 1050{
1046 struct pool *pool = tc->pool; 1051 struct pool *pool = tc->pool;
1047 struct new_mapping *m = get_next_mapping(pool); 1052 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1048 1053
1049 INIT_LIST_HEAD(&m->list); 1054 INIT_LIST_HEAD(&m->list);
1050 m->quiesced = 1; 1055 m->quiesced = 1;
@@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1065 process_prepared_mapping(m); 1070 process_prepared_mapping(m);
1066 1071
1067 else if (io_overwrites_block(pool, bio)) { 1072 else if (io_overwrites_block(pool, bio)) {
1068 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1073 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1074
1069 h->overwrite_mapping = m; 1075 h->overwrite_mapping = m;
1070 m->bio = bio; 1076 m->bio = bio;
1071 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1077 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1072 remap_and_issue(tc, bio, data_block); 1078 remap_and_issue(tc, bio, data_block);
1073
1074 } else { 1079 } else {
1075 int r; 1080 int r;
1076 struct dm_io_region to; 1081 struct dm_io_region to;
@@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1155 */ 1160 */
1156static void retry_on_resume(struct bio *bio) 1161static void retry_on_resume(struct bio *bio)
1157{ 1162{
1158 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1163 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1159 struct thin_c *tc = h->tc; 1164 struct thin_c *tc = h->tc;
1160 struct pool *pool = tc->pool; 1165 struct pool *pool = tc->pool;
1161 unsigned long flags; 1166 unsigned long flags;
@@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio)
1165 spin_unlock_irqrestore(&pool->lock, flags); 1170 spin_unlock_irqrestore(&pool->lock, flags);
1166} 1171}
1167 1172
1168static void no_space(struct cell *cell) 1173static void no_space(struct dm_bio_prison_cell *cell)
1169{ 1174{
1170 struct bio *bio; 1175 struct bio *bio;
1171 struct bio_list bios; 1176 struct bio_list bios;
@@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1182 int r; 1187 int r;
1183 unsigned long flags; 1188 unsigned long flags;
1184 struct pool *pool = tc->pool; 1189 struct pool *pool = tc->pool;
1185 struct cell *cell, *cell2; 1190 struct dm_bio_prison_cell *cell, *cell2;
1186 struct cell_key key, key2; 1191 struct cell_key key, key2;
1187 dm_block_t block = get_bio_block(tc, bio); 1192 dm_block_t block = get_bio_block(tc, bio);
1188 struct dm_thin_lookup_result lookup_result; 1193 struct dm_thin_lookup_result lookup_result;
1189 struct new_mapping *m; 1194 struct dm_thin_new_mapping *m;
1190 1195
1191 build_virtual_key(tc->td, block, &key); 1196 build_virtual_key(tc->td, block, &key);
1192 if (bio_detain(tc->pool->prison, &key, bio, &cell)) 1197 if (bio_detain(tc->pool->prison, &key, bio, &cell))
@@ -1263,7 +1268,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1263static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1268static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1264 struct cell_key *key, 1269 struct cell_key *key,
1265 struct dm_thin_lookup_result *lookup_result, 1270 struct dm_thin_lookup_result *lookup_result,
1266 struct cell *cell) 1271 struct dm_bio_prison_cell *cell)
1267{ 1272{
1268 int r; 1273 int r;
1269 dm_block_t data_block; 1274 dm_block_t data_block;
@@ -1290,7 +1295,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1290 dm_block_t block, 1295 dm_block_t block,
1291 struct dm_thin_lookup_result *lookup_result) 1296 struct dm_thin_lookup_result *lookup_result)
1292{ 1297{
1293 struct cell *cell; 1298 struct dm_bio_prison_cell *cell;
1294 struct pool *pool = tc->pool; 1299 struct pool *pool = tc->pool;
1295 struct cell_key key; 1300 struct cell_key key;
1296 1301
@@ -1305,7 +1310,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1305 if (bio_data_dir(bio) == WRITE) 1310 if (bio_data_dir(bio) == WRITE)
1306 break_sharing(tc, bio, block, &key, lookup_result, cell); 1311 break_sharing(tc, bio, block, &key, lookup_result, cell);
1307 else { 1312 else {
1308 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1313 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1309 1314
1310 h->shared_read_entry = ds_inc(&pool->shared_read_ds); 1315 h->shared_read_entry = ds_inc(&pool->shared_read_ds);
1311 1316
@@ -1315,7 +1320,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1315} 1320}
1316 1321
1317static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, 1322static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1318 struct cell *cell) 1323 struct dm_bio_prison_cell *cell)
1319{ 1324{
1320 int r; 1325 int r;
1321 dm_block_t data_block; 1326 dm_block_t data_block;
@@ -1363,7 +1368,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1363{ 1368{
1364 int r; 1369 int r;
1365 dm_block_t block = get_bio_block(tc, bio); 1370 dm_block_t block = get_bio_block(tc, bio);
1366 struct cell *cell; 1371 struct dm_bio_prison_cell *cell;
1367 struct cell_key key; 1372 struct cell_key key;
1368 struct dm_thin_lookup_result lookup_result; 1373 struct dm_thin_lookup_result lookup_result;
1369 1374
@@ -1432,7 +1437,7 @@ static void process_deferred_bios(struct pool *pool)
1432 spin_unlock_irqrestore(&pool->lock, flags); 1437 spin_unlock_irqrestore(&pool->lock, flags);
1433 1438
1434 while ((bio = bio_list_pop(&bios))) { 1439 while ((bio = bio_list_pop(&bios))) {
1435 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1440 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1436 struct thin_c *tc = h->tc; 1441 struct thin_c *tc = h->tc;
1437 1442
1438 /* 1443 /*
@@ -1522,10 +1527,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1522 wake_worker(pool); 1527 wake_worker(pool);
1523} 1528}
1524 1529
1525static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) 1530static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1526{ 1531{
1527 struct pool *pool = tc->pool; 1532 struct pool *pool = tc->pool;
1528 struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); 1533 struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1529 1534
1530 h->tc = tc; 1535 h->tc = tc;
1531 h->shared_read_entry = NULL; 1536 h->shared_read_entry = NULL;
@@ -1687,6 +1692,9 @@ static void __pool_destroy(struct pool *pool)
1687 kfree(pool); 1692 kfree(pool);
1688} 1693}
1689 1694
1695static struct kmem_cache *_new_mapping_cache;
1696static struct kmem_cache *_endio_hook_cache;
1697
1690static struct pool *pool_create(struct mapped_device *pool_md, 1698static struct pool *pool_create(struct mapped_device *pool_md,
1691 struct block_device *metadata_dev, 1699 struct block_device *metadata_dev,
1692 unsigned long block_size, char **error) 1700 unsigned long block_size, char **error)
@@ -1755,16 +1763,16 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1755 ds_init(&pool->all_io_ds); 1763 ds_init(&pool->all_io_ds);
1756 1764
1757 pool->next_mapping = NULL; 1765 pool->next_mapping = NULL;
1758 pool->mapping_pool = 1766 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1759 mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping)); 1767 _new_mapping_cache);
1760 if (!pool->mapping_pool) { 1768 if (!pool->mapping_pool) {
1761 *error = "Error creating pool's mapping mempool"; 1769 *error = "Error creating pool's mapping mempool";
1762 err_p = ERR_PTR(-ENOMEM); 1770 err_p = ERR_PTR(-ENOMEM);
1763 goto bad_mapping_pool; 1771 goto bad_mapping_pool;
1764 } 1772 }
1765 1773
1766 pool->endio_hook_pool = 1774 pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
1767 mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook)); 1775 _endio_hook_cache);
1768 if (!pool->endio_hook_pool) { 1776 if (!pool->endio_hook_pool) {
1769 *error = "Error creating pool's endio_hook mempool"; 1777 *error = "Error creating pool's endio_hook mempool";
1770 err_p = ERR_PTR(-ENOMEM); 1778 err_p = ERR_PTR(-ENOMEM);
@@ -2276,6 +2284,36 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
2276 return 0; 2284 return 0;
2277} 2285}
2278 2286
2287static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2288{
2289 int r;
2290
2291 r = check_arg_count(argc, 1);
2292 if (r)
2293 return r;
2294
2295 r = dm_pool_reserve_metadata_snap(pool->pmd);
2296 if (r)
2297 DMWARN("reserve_metadata_snap message failed.");
2298
2299 return r;
2300}
2301
2302static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2303{
2304 int r;
2305
2306 r = check_arg_count(argc, 1);
2307 if (r)
2308 return r;
2309
2310 r = dm_pool_release_metadata_snap(pool->pmd);
2311 if (r)
2312 DMWARN("release_metadata_snap message failed.");
2313
2314 return r;
2315}
2316
2279/* 2317/*
2280 * Messages supported: 2318 * Messages supported:
2281 * create_thin <dev_id> 2319 * create_thin <dev_id>
@@ -2283,6 +2321,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
2283 * delete <dev_id> 2321 * delete <dev_id>
2284 * trim <dev_id> <new_size_in_sectors> 2322 * trim <dev_id> <new_size_in_sectors>
2285 * set_transaction_id <current_trans_id> <new_trans_id> 2323 * set_transaction_id <current_trans_id> <new_trans_id>
2324 * reserve_metadata_snap
2325 * release_metadata_snap
2286 */ 2326 */
2287static int pool_message(struct dm_target *ti, unsigned argc, char **argv) 2327static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2288{ 2328{
@@ -2302,6 +2342,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2302 else if (!strcasecmp(argv[0], "set_transaction_id")) 2342 else if (!strcasecmp(argv[0], "set_transaction_id"))
2303 r = process_set_transaction_id_mesg(argc, argv, pool); 2343 r = process_set_transaction_id_mesg(argc, argv, pool);
2304 2344
2345 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2346 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2347
2348 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2349 r = process_release_metadata_snap_mesg(argc, argv, pool);
2350
2305 else 2351 else
2306 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); 2352 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2307 2353
@@ -2361,7 +2407,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
2361 if (r) 2407 if (r)
2362 return r; 2408 return r;
2363 2409
2364 r = dm_pool_get_held_metadata_root(pool->pmd, &held_root); 2410 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2365 if (r) 2411 if (r)
2366 return r; 2412 return r;
2367 2413
@@ -2457,7 +2503,7 @@ static struct target_type pool_target = {
2457 .name = "thin-pool", 2503 .name = "thin-pool",
2458 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2504 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2459 DM_TARGET_IMMUTABLE, 2505 DM_TARGET_IMMUTABLE,
2460 .version = {1, 1, 0}, 2506 .version = {1, 2, 0},
2461 .module = THIS_MODULE, 2507 .module = THIS_MODULE,
2462 .ctr = pool_ctr, 2508 .ctr = pool_ctr,
2463 .dtr = pool_dtr, 2509 .dtr = pool_dtr,
@@ -2613,9 +2659,9 @@ static int thin_endio(struct dm_target *ti,
2613 union map_info *map_context) 2659 union map_info *map_context)
2614{ 2660{
2615 unsigned long flags; 2661 unsigned long flags;
2616 struct endio_hook *h = map_context->ptr; 2662 struct dm_thin_endio_hook *h = map_context->ptr;
2617 struct list_head work; 2663 struct list_head work;
2618 struct new_mapping *m, *tmp; 2664 struct dm_thin_new_mapping *m, *tmp;
2619 struct pool *pool = h->tc->pool; 2665 struct pool *pool = h->tc->pool;
2620 2666
2621 if (h->shared_read_entry) { 2667 if (h->shared_read_entry) {
@@ -2755,7 +2801,32 @@ static int __init dm_thin_init(void)
2755 2801
2756 r = dm_register_target(&pool_target); 2802 r = dm_register_target(&pool_target);
2757 if (r) 2803 if (r)
2758 dm_unregister_target(&thin_target); 2804 goto bad_pool_target;
2805
2806 r = -ENOMEM;
2807
2808 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
2809 if (!_cell_cache)
2810 goto bad_cell_cache;
2811
2812 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2813 if (!_new_mapping_cache)
2814 goto bad_new_mapping_cache;
2815
2816 _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
2817 if (!_endio_hook_cache)
2818 goto bad_endio_hook_cache;
2819
2820 return 0;
2821
2822bad_endio_hook_cache:
2823 kmem_cache_destroy(_new_mapping_cache);
2824bad_new_mapping_cache:
2825 kmem_cache_destroy(_cell_cache);
2826bad_cell_cache:
2827 dm_unregister_target(&pool_target);
2828bad_pool_target:
2829 dm_unregister_target(&thin_target);
2759 2830
2760 return r; 2831 return r;
2761} 2832}
@@ -2764,6 +2835,10 @@ static void dm_thin_exit(void)
2764{ 2835{
2765 dm_unregister_target(&thin_target); 2836 dm_unregister_target(&thin_target);
2766 dm_unregister_target(&pool_target); 2837 dm_unregister_target(&pool_target);
2838
2839 kmem_cache_destroy(_cell_cache);
2840 kmem_cache_destroy(_new_mapping_cache);
2841 kmem_cache_destroy(_endio_hook_cache);
2767} 2842}
2768 2843
2769module_init(dm_thin_init); 2844module_init(dm_thin_init);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 6f8d38747d7f..400fe144c0cd 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -249,6 +249,7 @@ int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
249 249
250 return r; 250 return r;
251} 251}
252EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
252 253
253int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b, 254int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
254 struct dm_block_validator *v, 255 struct dm_block_validator *v,
@@ -259,6 +260,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
259 260
260 return dm_bm_read_lock(tm->bm, b, v, blk); 261 return dm_bm_read_lock(tm->bm, b, v, blk);
261} 262}
263EXPORT_SYMBOL_GPL(dm_tm_read_lock);
262 264
263int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b) 265int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
264{ 266{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 835de7168cd3..a9c7981ddd24 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2550,6 +2550,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2550 err = -EINVAL; 2550 err = -EINVAL;
2551 spin_lock_init(&conf->device_lock); 2551 spin_lock_init(&conf->device_lock);
2552 rdev_for_each(rdev, mddev) { 2552 rdev_for_each(rdev, mddev) {
2553 struct request_queue *q;
2553 int disk_idx = rdev->raid_disk; 2554 int disk_idx = rdev->raid_disk;
2554 if (disk_idx >= mddev->raid_disks 2555 if (disk_idx >= mddev->raid_disks
2555 || disk_idx < 0) 2556 || disk_idx < 0)
@@ -2562,6 +2563,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2562 if (disk->rdev) 2563 if (disk->rdev)
2563 goto abort; 2564 goto abort;
2564 disk->rdev = rdev; 2565 disk->rdev = rdev;
2566 q = bdev_get_queue(rdev->bdev);
2567 if (q->merge_bvec_fn)
2568 mddev->merge_check_needed = 1;
2565 2569
2566 disk->head_position = 0; 2570 disk->head_position = 0;
2567 } 2571 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 987db37cb875..99ae6068e456 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3475,6 +3475,7 @@ static int run(struct mddev *mddev)
3475 3475
3476 rdev_for_each(rdev, mddev) { 3476 rdev_for_each(rdev, mddev) {
3477 long long diff; 3477 long long diff;
3478 struct request_queue *q;
3478 3479
3479 disk_idx = rdev->raid_disk; 3480 disk_idx = rdev->raid_disk;
3480 if (disk_idx < 0) 3481 if (disk_idx < 0)
@@ -3493,6 +3494,9 @@ static int run(struct mddev *mddev)
3493 goto out_free_conf; 3494 goto out_free_conf;
3494 disk->rdev = rdev; 3495 disk->rdev = rdev;
3495 } 3496 }
3497 q = bdev_get_queue(rdev->bdev);
3498 if (q->merge_bvec_fn)
3499 mddev->merge_check_needed = 1;
3496 diff = (rdev->new_data_offset - rdev->data_offset); 3500 diff = (rdev->new_data_offset - rdev->data_offset);
3497 if (!mddev->reshape_backwards) 3501 if (!mddev->reshape_backwards)
3498 diff = -diff; 3502 diff = -diff;
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index af2d9086d7e8..c370c2d87c17 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -29,6 +29,7 @@
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/slab.h>
32#include <linux/uaccess.h> 33#include <linux/uaccess.h>
33#include <linux/isa.h> 34#include <linux/isa.h>
34#include <asm/io.h> 35#include <asm/io.h>
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a5c591ffe395..d99db5623acf 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1653,7 +1653,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1653 unsigned long port; 1653 unsigned long port;
1654 u32 msize; 1654 u32 msize;
1655 u32 psize; 1655 u32 psize;
1656 u8 revision;
1657 int r = -ENODEV; 1656 int r = -ENODEV;
1658 struct pci_dev *pdev; 1657 struct pci_dev *pdev;
1659 1658
@@ -1670,8 +1669,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1670 return r; 1669 return r;
1671 } 1670 }
1672 1671
1673 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1674
1675 if (sizeof(dma_addr_t) > 4) { 1672 if (sizeof(dma_addr_t) > 4) {
1676 const uint64_t required_mask = dma_get_required_mask 1673 const uint64_t required_mask = dma_get_required_mask
1677 (&pdev->dev); 1674 (&pdev->dev);
@@ -1779,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1779 MPT_ADAPTER *ioc; 1776 MPT_ADAPTER *ioc;
1780 u8 cb_idx; 1777 u8 cb_idx;
1781 int r = -ENODEV; 1778 int r = -ENODEV;
1782 u8 revision;
1783 u8 pcixcmd; 1779 u8 pcixcmd;
1784 static int mpt_ids = 0; 1780 static int mpt_ids = 0;
1785#ifdef CONFIG_PROC_FS 1781#ifdef CONFIG_PROC_FS
@@ -1887,8 +1883,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1887 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n", 1883 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
1888 ioc->name, &ioc->facts, &ioc->pfacts[0])); 1884 ioc->name, &ioc->facts, &ioc->pfacts[0]));
1889 1885
1890 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1886 mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
1891 mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name); 1887 ioc->prod_name);
1892 1888
1893 switch (pdev->device) 1889 switch (pdev->device)
1894 { 1890 {
@@ -1903,7 +1899,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1903 break; 1899 break;
1904 1900
1905 case MPI_MANUFACTPAGE_DEVICEID_FC929X: 1901 case MPI_MANUFACTPAGE_DEVICEID_FC929X:
1906 if (revision < XL_929) { 1902 if (pdev->revision < XL_929) {
1907 /* 929X Chip Fix. Set Split transactions level 1903 /* 929X Chip Fix. Set Split transactions level
1908 * for PCIX. Set MOST bits to zero. 1904 * for PCIX. Set MOST bits to zero.
1909 */ 1905 */
@@ -1934,7 +1930,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1934 /* 1030 Chip Fix. Disable Split transactions 1930 /* 1030 Chip Fix. Disable Split transactions
1935 * for PCIX. Set MOST bits to zero if Rev < C0( = 8). 1931 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
1936 */ 1932 */
1937 if (revision < C0_1030) { 1933 if (pdev->revision < C0_1030) {
1938 pci_read_config_byte(pdev, 0x6a, &pcixcmd); 1934 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1939 pcixcmd &= 0x8F; 1935 pcixcmd &= 0x8F;
1940 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1936 pci_write_config_byte(pdev, 0x6a, pcixcmd);
@@ -6483,6 +6479,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
6483 printk(MYIOC_s_INFO_FMT "%s: host reset in" 6479 printk(MYIOC_s_INFO_FMT "%s: host reset in"
6484 " progress mpt_config timed out.!!\n", 6480 " progress mpt_config timed out.!!\n",
6485 __func__, ioc->name); 6481 __func__, ioc->name);
6482 mutex_unlock(&ioc->mptbase_cmds.mutex);
6486 return -EFAULT; 6483 return -EFAULT;
6487 } 6484 }
6488 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6485 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 6e6e16aab9da..b383b6961e59 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1250,7 +1250,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1250 int iocnum; 1250 int iocnum;
1251 unsigned int port; 1251 unsigned int port;
1252 int cim_rev; 1252 int cim_rev;
1253 u8 revision;
1254 struct scsi_device *sdev; 1253 struct scsi_device *sdev;
1255 VirtDevice *vdevice; 1254 VirtDevice *vdevice;
1256 1255
@@ -1324,8 +1323,7 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1324 pdev = (struct pci_dev *) ioc->pcidev; 1323 pdev = (struct pci_dev *) ioc->pcidev;
1325 1324
1326 karg->pciId = pdev->device; 1325 karg->pciId = pdev->device;
1327 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1326 karg->hwRev = pdev->revision;
1328 karg->hwRev = revision;
1329 karg->subSystemDevice = pdev->subsystem_device; 1327 karg->subSystemDevice = pdev->subsystem_device;
1330 karg->subSystemVendor = pdev->subsystem_vendor; 1328 karg->subSystemVendor = pdev->subsystem_vendor;
1331 1329
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 671c8bc14bbc..50e83dc5dc49 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2735,6 +2735,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
2735 REGULATOR_SUPPLY("vcore", "uart2"), 2735 REGULATOR_SUPPLY("vcore", "uart2"),
2736 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"), 2736 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
2737 REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"), 2737 REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
2738 REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
2738}; 2739};
2739 2740
2740static struct regulator_consumer_supply db8500_vsmps2_consumers[] = { 2741static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 373f423b1181..947a06a1845f 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * License Terms: GNU General Public License, version 2 7 * License Terms: GNU General Public License, version 2
8 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson 8 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
9 * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics 9 * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
10 */ 10 */
11 11
12#include <linux/i2c.h> 12#include <linux/i2c.h>
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index afd459013ecb..9edfe864cc05 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,7 +4,7 @@
4 * Copyright (C) ST Microelectronics SA 2011 4 * Copyright (C) ST Microelectronics SA 2011
5 * 5 *
6 * License Terms: GNU General Public License, version 2 6 * License Terms: GNU General Public License, version 2
7 * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics 7 * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
8 */ 8 */
9 9
10#include <linux/spi/spi.h> 10#include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
146 146
147MODULE_LICENSE("GPL v2"); 147MODULE_LICENSE("GPL v2");
148MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver"); 148MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
149MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 149MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 93936f1b75eb..23f5463d4cae 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -835,7 +835,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
835 struct mei_cl *cl, 835 struct mei_cl *cl,
836 struct mei_io_list *cmpl_list) 836 struct mei_io_list *cmpl_list)
837{ 837{
838 if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) + 838 if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
839 sizeof(struct hbm_flow_control))) { 839 sizeof(struct hbm_flow_control))) {
840 /* return the cancel routine */ 840 /* return the cancel routine */
841 list_del(&cb_pos->cb_list); 841 list_del(&cb_pos->cb_list);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index c70333228337..7de13891e49e 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -982,7 +982,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
982 err = request_threaded_irq(pdev->irq, 982 err = request_threaded_irq(pdev->irq,
983 NULL, 983 NULL,
984 mei_interrupt_thread_handler, 984 mei_interrupt_thread_handler,
985 0, mei_driver_name, dev); 985 IRQF_ONESHOT, mei_driver_name, dev);
986 else 986 else
987 err = request_threaded_irq(pdev->irq, 987 err = request_threaded_irq(pdev->irq,
988 mei_interrupt_quick_handler, 988 mei_interrupt_quick_handler,
@@ -992,7 +992,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
992 if (err) { 992 if (err) {
993 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", 993 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
994 pdev->irq); 994 pdev->irq);
995 goto unmap_memory; 995 goto disable_msi;
996 } 996 }
997 INIT_DELAYED_WORK(&dev->timer_work, mei_timer); 997 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
998 if (mei_hw_init(dev)) { 998 if (mei_hw_init(dev)) {
@@ -1023,8 +1023,8 @@ release_irq:
1023 mei_disable_interrupts(dev); 1023 mei_disable_interrupts(dev);
1024 flush_scheduled_work(); 1024 flush_scheduled_work();
1025 free_irq(pdev->irq, dev); 1025 free_irq(pdev->irq, dev);
1026disable_msi:
1026 pci_disable_msi(pdev); 1027 pci_disable_msi(pdev);
1027unmap_memory:
1028 pci_iounmap(pdev, dev->mem_addr); 1028 pci_iounmap(pdev, dev->mem_addr);
1029free_device: 1029free_device:
1030 kfree(dev); 1030 kfree(dev);
@@ -1101,6 +1101,8 @@ static void __devexit mei_remove(struct pci_dev *pdev)
1101 1101
1102 pci_release_regions(pdev); 1102 pci_release_regions(pdev);
1103 pci_disable_device(pdev); 1103 pci_disable_device(pdev);
1104
1105 misc_deregister(&mei_misc_device);
1104} 1106}
1105#ifdef CONFIG_PM 1107#ifdef CONFIG_PM
1106static int mei_pci_suspend(struct device *device) 1108static int mei_pci_suspend(struct device *device)
@@ -1216,7 +1218,6 @@ module_init(mei_init_module);
1216 */ 1218 */
1217static void __exit mei_exit_module(void) 1219static void __exit mei_exit_module(void)
1218{ 1220{
1219 misc_deregister(&mei_misc_device);
1220 pci_unregister_driver(&mei_driver); 1221 pci_unregister_driver(&mei_driver);
1221 1222
1222 pr_debug("unloaded successfully.\n"); 1223 pr_debug("unloaded successfully.\n");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 6be5605707b4..e2ec0505eb5c 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -341,7 +341,7 @@ static const struct watchdog_ops wd_ops = {
341}; 341};
342static const struct watchdog_info wd_info = { 342static const struct watchdog_info wd_info = {
343 .identity = INTEL_AMT_WATCHDOG_ID, 343 .identity = INTEL_AMT_WATCHDOG_ID,
344 .options = WDIOF_KEEPALIVEPING, 344 .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
345}; 345};
346 346
347static struct watchdog_device amt_wd_dev = { 347static struct watchdog_device amt_wd_dev = {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2d4a4b746750..258b203397aa 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1326,7 +1326,7 @@ static int mmc_suspend(struct mmc_host *host)
1326 if (!err) 1326 if (!err)
1327 mmc_card_set_sleep(host->card); 1327 mmc_card_set_sleep(host->card);
1328 } else if (!mmc_host_is_spi(host)) 1328 } else if (!mmc_host_is_spi(host))
1329 mmc_deselect_cards(host); 1329 err = mmc_deselect_cards(host);
1330 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1330 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
1331 mmc_release_host(host); 1331 mmc_release_host(host);
1332 1332
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c272c6868ecf..b2b43f624b9e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1075,16 +1075,18 @@ static void mmc_sd_detect(struct mmc_host *host)
1075 */ 1075 */
1076static int mmc_sd_suspend(struct mmc_host *host) 1076static int mmc_sd_suspend(struct mmc_host *host)
1077{ 1077{
1078 int err = 0;
1079
1078 BUG_ON(!host); 1080 BUG_ON(!host);
1079 BUG_ON(!host->card); 1081 BUG_ON(!host->card);
1080 1082
1081 mmc_claim_host(host); 1083 mmc_claim_host(host);
1082 if (!mmc_host_is_spi(host)) 1084 if (!mmc_host_is_spi(host))
1083 mmc_deselect_cards(host); 1085 err = mmc_deselect_cards(host);
1084 host->card->state &= ~MMC_STATE_HIGHSPEED; 1086 host->card->state &= ~MMC_STATE_HIGHSPEED;
1085 mmc_release_host(host); 1087 mmc_release_host(host);
1086 1088
1087 return 0; 1089 return err;
1088} 1090}
1089 1091
1090/* 1092/*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 13d0e95380ab..41c5fd8848f4 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -218,6 +218,12 @@ static int sdio_enable_wide(struct mmc_card *card)
218 if (ret) 218 if (ret)
219 return ret; 219 return ret;
220 220
221 if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
222 pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
223 mmc_hostname(card->host), ctrl);
224
225 /* set as 4-bit bus width */
226 ctrl &= ~SDIO_BUS_WIDTH_MASK;
221 ctrl |= SDIO_BUS_WIDTH_4BIT; 227 ctrl |= SDIO_BUS_WIDTH_4BIT;
222 228
223 ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); 229 ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 787aba1682bb..ab56f7db5315 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -140,4 +140,18 @@
140#define atmci_writel(port,reg,value) \ 140#define atmci_writel(port,reg,value) \
141 __raw_writel((value), (port)->regs + reg) 141 __raw_writel((value), (port)->regs + reg)
142 142
143/*
144 * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
145 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
146 *
147 * This can be done by finding most significant bit set.
148 */
149static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
150{
151 if (maxburst > 1)
152 return fls(maxburst) - 2;
153 else
154 return 0;
155}
156
143#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ 157#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 420aca642b14..f2c115e06438 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -910,6 +910,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
910 enum dma_data_direction direction; 910 enum dma_data_direction direction;
911 enum dma_transfer_direction slave_dirn; 911 enum dma_transfer_direction slave_dirn;
912 unsigned int sglen; 912 unsigned int sglen;
913 u32 maxburst;
913 u32 iflags; 914 u32 iflags;
914 915
915 data->error = -EINPROGRESS; 916 data->error = -EINPROGRESS;
@@ -943,17 +944,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
943 if (!chan) 944 if (!chan)
944 return -ENODEV; 945 return -ENODEV;
945 946
946 if (host->caps.has_dma)
947 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
948
949 if (data->flags & MMC_DATA_READ) { 947 if (data->flags & MMC_DATA_READ) {
950 direction = DMA_FROM_DEVICE; 948 direction = DMA_FROM_DEVICE;
951 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; 949 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
950 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
952 } else { 951 } else {
953 direction = DMA_TO_DEVICE; 952 direction = DMA_TO_DEVICE;
954 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; 953 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
954 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
955 } 955 }
956 956
957 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
958
957 sglen = dma_map_sg(chan->device->dev, data->sg, 959 sglen = dma_map_sg(chan->device->dev, data->sg,
958 data->sg_len, direction); 960 data->sg_len, direction);
959 961
@@ -2314,6 +2316,8 @@ static int __init atmci_probe(struct platform_device *pdev)
2314 2316
2315 platform_set_drvdata(pdev, host); 2317 platform_set_drvdata(pdev, host);
2316 2318
2319 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2320
2317 /* We need at least one slot to succeed */ 2321 /* We need at least one slot to succeed */
2318 nr_slots = 0; 2322 nr_slots = 0;
2319 ret = -ENODEV; 2323 ret = -ENODEV;
@@ -2352,8 +2356,6 @@ static int __init atmci_probe(struct platform_device *pdev)
2352 } 2356 }
2353 } 2357 }
2354 2358
2355 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2356
2357 dev_info(&pdev->dev, 2359 dev_info(&pdev->dev,
2358 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", 2360 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2359 host->mapbase, irq, nr_slots); 2361 host->mapbase, irq, nr_slots);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 9bbf45f8c538..1ca5e72ceb65 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -418,6 +418,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
418 p->des3 = host->sg_dma; 418 p->des3 = host->sg_dma;
419 p->des0 = IDMAC_DES0_ER; 419 p->des0 = IDMAC_DES0_ER;
420 420
421 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
422
421 /* Mask out interrupts - get Tx & Rx complete only */ 423 /* Mask out interrupts - get Tx & Rx complete only */
422 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | 424 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
423 SDMMC_IDMAC_INT_TI); 425 SDMMC_IDMAC_INT_TI);
@@ -615,14 +617,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
615 u32 div; 617 u32 div;
616 618
617 if (slot->clock != host->current_speed) { 619 if (slot->clock != host->current_speed) {
618 if (host->bus_hz % slot->clock) 620 div = host->bus_hz / slot->clock;
621 if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
619 /* 622 /*
620 * move the + 1 after the divide to prevent 623 * move the + 1 after the divide to prevent
621 * over-clocking the card. 624 * over-clocking the card.
622 */ 625 */
623 div = ((host->bus_hz / slot->clock) >> 1) + 1; 626 div += 1;
624 else 627
625 div = (host->bus_hz / slot->clock) >> 1; 628 div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
626 629
627 dev_info(&slot->mmc->class_dev, 630 dev_info(&slot->mmc->class_dev,
628 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" 631 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
@@ -939,8 +942,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
939 mdelay(20); 942 mdelay(20);
940 943
941 if (cmd->data) { 944 if (cmd->data) {
942 host->data = NULL;
943 dw_mci_stop_dma(host); 945 dw_mci_stop_dma(host);
946 host->data = NULL;
944 } 947 }
945 } 948 }
946} 949}
@@ -1623,7 +1626,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1623 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 1626 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1624 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); 1627 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1625 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 1628 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1626 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1627 host->dma_ops->complete(host); 1629 host->dma_ops->complete(host);
1628 } 1630 }
1629#endif 1631#endif
@@ -1725,7 +1727,8 @@ static void dw_mci_work_routine_card(struct work_struct *work)
1725 1727
1726#ifdef CONFIG_MMC_DW_IDMAC 1728#ifdef CONFIG_MMC_DW_IDMAC
1727 ctrl = mci_readl(host, BMOD); 1729 ctrl = mci_readl(host, BMOD);
1728 ctrl |= 0x01; /* Software reset of DMA */ 1730 /* Software reset of DMA */
1731 ctrl |= SDMMC_IDMAC_SWRESET;
1729 mci_writel(host, BMOD, ctrl); 1732 mci_writel(host, BMOD, ctrl);
1730#endif 1733#endif
1731 1734
@@ -1950,10 +1953,6 @@ int dw_mci_probe(struct dw_mci *host)
1950 spin_lock_init(&host->lock); 1953 spin_lock_init(&host->lock);
1951 INIT_LIST_HEAD(&host->queue); 1954 INIT_LIST_HEAD(&host->queue);
1952 1955
1953
1954 host->dma_ops = host->pdata->dma_ops;
1955 dw_mci_init_dma(host);
1956
1957 /* 1956 /*
1958 * Get the host data width - this assumes that HCON has been set with 1957 * Get the host data width - this assumes that HCON has been set with
1959 * the correct values. 1958 * the correct values.
@@ -1981,10 +1980,11 @@ int dw_mci_probe(struct dw_mci *host)
1981 } 1980 }
1982 1981
1983 /* Reset all blocks */ 1982 /* Reset all blocks */
1984 if (!mci_wait_reset(&host->dev, host)) { 1983 if (!mci_wait_reset(&host->dev, host))
1985 ret = -ENODEV; 1984 return -ENODEV;
1986 goto err_dmaunmap; 1985
1987 } 1986 host->dma_ops = host->pdata->dma_ops;
1987 dw_mci_init_dma(host);
1988 1988
1989 /* Clear the interrupts for the host controller */ 1989 /* Clear the interrupts for the host controller */
1990 mci_writel(host, RINTSTS, 0xFFFFFFFF); 1990 mci_writel(host, RINTSTS, 0xFFFFFFFF);
@@ -2170,14 +2170,14 @@ int dw_mci_resume(struct dw_mci *host)
2170 if (host->vmmc) 2170 if (host->vmmc)
2171 regulator_enable(host->vmmc); 2171 regulator_enable(host->vmmc);
2172 2172
2173 if (host->dma_ops->init)
2174 host->dma_ops->init(host);
2175
2176 if (!mci_wait_reset(&host->dev, host)) { 2173 if (!mci_wait_reset(&host->dev, host)) {
2177 ret = -ENODEV; 2174 ret = -ENODEV;
2178 return ret; 2175 return ret;
2179 } 2176 }
2180 2177
2178 if (host->dma_ops->init)
2179 host->dma_ops->init(host);
2180
2181 /* Restore the old value at FIFOTH register */ 2181 /* Restore the old value at FIFOTH register */
2182 mci_writel(host, FIFOTH, host->fifoth_val); 2182 mci_writel(host, FIFOTH, host->fifoth_val);
2183 2183
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f0fcce40cd8d..50ff19a62368 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1216,12 +1216,7 @@ static void mmci_dt_populate_generic_pdata(struct device_node *np,
1216 int bus_width = 0; 1216 int bus_width = 0;
1217 1217
1218 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1218 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
1219 if (!pdata->gpio_wp)
1220 pdata->gpio_wp = -1;
1221
1222 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); 1219 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
1223 if (!pdata->gpio_cd)
1224 pdata->gpio_cd = -1;
1225 1220
1226 if (of_get_property(np, "cd-inverted", NULL)) 1221 if (of_get_property(np, "cd-inverted", NULL))
1227 pdata->cd_invert = true; 1222 pdata->cd_invert = true;
@@ -1276,6 +1271,12 @@ static int __devinit mmci_probe(struct amba_device *dev,
1276 return -EINVAL; 1271 return -EINVAL;
1277 } 1272 }
1278 1273
1274 if (!plat) {
1275 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1276 if (!plat)
1277 return -ENOMEM;
1278 }
1279
1279 if (np) 1280 if (np)
1280 mmci_dt_populate_generic_pdata(np, plat); 1281 mmci_dt_populate_generic_pdata(np, plat);
1281 1282
@@ -1424,6 +1425,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
1424 writel(0, host->base + MMCIMASK1); 1425 writel(0, host->base + MMCIMASK1);
1425 writel(0xfff, host->base + MMCICLEAR); 1426 writel(0xfff, host->base + MMCICLEAR);
1426 1427
1428 if (plat->gpio_cd == -EPROBE_DEFER) {
1429 ret = -EPROBE_DEFER;
1430 goto err_gpio_cd;
1431 }
1427 if (gpio_is_valid(plat->gpio_cd)) { 1432 if (gpio_is_valid(plat->gpio_cd)) {
1428 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1433 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1429 if (ret == 0) 1434 if (ret == 0)
@@ -1447,6 +1452,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
1447 if (ret >= 0) 1452 if (ret >= 0)
1448 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1453 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1449 } 1454 }
1455 if (plat->gpio_wp == -EPROBE_DEFER) {
1456 ret = -EPROBE_DEFER;
1457 goto err_gpio_wp;
1458 }
1450 if (gpio_is_valid(plat->gpio_wp)) { 1459 if (gpio_is_valid(plat->gpio_wp)) {
1451 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1460 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1452 if (ret == 0) 1461 if (ret == 0)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 34a90266ab11..277161d279b8 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -894,8 +894,8 @@ static struct platform_driver mxs_mmc_driver = {
894 .owner = THIS_MODULE, 894 .owner = THIS_MODULE,
895#ifdef CONFIG_PM 895#ifdef CONFIG_PM
896 .pm = &mxs_mmc_pm_ops, 896 .pm = &mxs_mmc_pm_ops,
897 .of_match_table = mxs_mmc_dt_ids,
898#endif 897#endif
898 .of_match_table = mxs_mmc_dt_ids,
899 }, 899 },
900}; 900};
901 901
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 552196c764d4..3e8dcf8d2e05 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1300,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = {
1300 .set_ios = mmc_omap_set_ios, 1300 .set_ios = mmc_omap_set_ios,
1301}; 1301};
1302 1302
1303static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) 1303static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1304{ 1304{
1305 struct mmc_omap_slot *slot = NULL; 1305 struct mmc_omap_slot *slot = NULL;
1306 struct mmc_host *mmc; 1306 struct mmc_host *mmc;
@@ -1485,24 +1485,26 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1485 } 1485 }
1486 1486
1487 host->nr_slots = pdata->nr_slots; 1487 host->nr_slots = pdata->nr_slots;
1488 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1489
1490 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1491 if (!host->mmc_omap_wq)
1492 goto err_plat_cleanup;
1493
1488 for (i = 0; i < pdata->nr_slots; i++) { 1494 for (i = 0; i < pdata->nr_slots; i++) {
1489 ret = mmc_omap_new_slot(host, i); 1495 ret = mmc_omap_new_slot(host, i);
1490 if (ret < 0) { 1496 if (ret < 0) {
1491 while (--i >= 0) 1497 while (--i >= 0)
1492 mmc_omap_remove_slot(host->slots[i]); 1498 mmc_omap_remove_slot(host->slots[i]);
1493 1499
1494 goto err_plat_cleanup; 1500 goto err_destroy_wq;
1495 } 1501 }
1496 } 1502 }
1497 1503
1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1499
1500 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1501 if (!host->mmc_omap_wq)
1502 goto err_plat_cleanup;
1503
1504 return 0; 1504 return 0;
1505 1505
1506err_destroy_wq:
1507 destroy_workqueue(host->mmc_omap_wq);
1506err_plat_cleanup: 1508err_plat_cleanup:
1507 if (pdata->cleanup) 1509 if (pdata->cleanup)
1508 pdata->cleanup(&pdev->dev); 1510 pdata->cleanup(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 55a164fcaa15..a50c205ea208 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -404,7 +404,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
404 if (sc->ext_cd_irq && 404 if (sc->ext_cd_irq &&
405 request_threaded_irq(sc->ext_cd_irq, NULL, 405 request_threaded_irq(sc->ext_cd_irq, NULL,
406 sdhci_s3c_gpio_card_detect_thread, 406 sdhci_s3c_gpio_card_detect_thread,
407 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 407 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
408 dev_name(dev), sc) == 0) { 408 dev_name(dev), sc) == 0) {
409 int status = gpio_get_value(sc->ext_cd_gpio); 409 int status = gpio_get_value(sc->ext_cd_gpio);
410 if (pdata->ext_cd_gpio_invert) 410 if (pdata->ext_cd_gpio_invert)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 1fe32dfa7cd4..423da8194cd8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
4 * Support of SDHCI platform devices for spear soc family 4 * Support of SDHCI platform devices for spear soc family
5 * 5 *
6 * Copyright (C) 2010 ST Microelectronics 6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * Inspired by sdhci-pltfm.c 9 * Inspired by sdhci-pltfm.c
10 * 10 *
@@ -289,5 +289,5 @@ static struct platform_driver sdhci_driver = {
289module_platform_driver(sdhci_driver); 289module_platform_driver(sdhci_driver);
290 290
291MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); 291MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
292MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 292MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
293MODULE_LICENSE("GPL v2"); 293MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e626732aff77..f4b8b4db3a9a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
680 } 680 }
681 681
682 if (count >= 0xF) { 682 if (count >= 0xF) {
683 pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n", 683 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
684 mmc_hostname(host->mmc), count, cmd->opcode); 684 mmc_hostname(host->mmc), count, cmd->opcode);
685 count = 0xE; 685 count = 0xE;
686 } 686 }
687 687
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5760c1a4b3f6..27143e042af5 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -128,7 +128,7 @@ config MTD_AFS_PARTS
128 128
129config MTD_OF_PARTS 129config MTD_OF_PARTS
130 tristate "OpenFirmware partitioning information support" 130 tristate "OpenFirmware partitioning information support"
131 default Y 131 default y
132 depends on OF 132 depends on OF
133 help 133 help
134 This provides a partition parsing function which derives 134 This provides a partition parsing function which derives
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 608321ee056e..63d2a64331f7 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -4,7 +4,7 @@
4 * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org> 4 * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
5 * Mike Albon <malbon@openwrt.org> 5 * Mike Albon <malbon@openwrt.org>
6 * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net> 6 * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
7 * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com> 7 * Copyright © 2011-2012 Jonas Gorski <jonas.gorski@gmail.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -82,6 +82,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
82 int namelen = 0; 82 int namelen = 0;
83 int i; 83 int i;
84 u32 computed_crc; 84 u32 computed_crc;
85 bool rootfs_first = false;
85 86
86 if (bcm63xx_detect_cfe(master)) 87 if (bcm63xx_detect_cfe(master))
87 return -EINVAL; 88 return -EINVAL;
@@ -109,6 +110,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
109 char *boardid = &(buf->board_id[0]); 110 char *boardid = &(buf->board_id[0]);
110 char *tagversion = &(buf->tag_version[0]); 111 char *tagversion = &(buf->tag_version[0]);
111 112
113 sscanf(buf->flash_image_start, "%u", &rootfsaddr);
112 sscanf(buf->kernel_address, "%u", &kerneladdr); 114 sscanf(buf->kernel_address, "%u", &kerneladdr);
113 sscanf(buf->kernel_length, "%u", &kernellen); 115 sscanf(buf->kernel_length, "%u", &kernellen);
114 sscanf(buf->total_length, "%u", &totallen); 116 sscanf(buf->total_length, "%u", &totallen);
@@ -117,10 +119,19 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
117 tagversion, boardid); 119 tagversion, boardid);
118 120
119 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE; 121 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
120 rootfsaddr = kerneladdr + kernellen; 122 rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
121 spareaddr = roundup(totallen, master->erasesize) + cfelen; 123 spareaddr = roundup(totallen, master->erasesize) + cfelen;
122 sparelen = master->size - spareaddr - nvramlen; 124 sparelen = master->size - spareaddr - nvramlen;
123 rootfslen = spareaddr - rootfsaddr; 125
126 if (rootfsaddr < kerneladdr) {
127 /* default Broadcom layout */
128 rootfslen = kerneladdr - rootfsaddr;
129 rootfs_first = true;
130 } else {
131 /* OpenWrt layout */
132 rootfsaddr = kerneladdr + kernellen;
133 rootfslen = spareaddr - rootfsaddr;
134 }
124 } else { 135 } else {
125 pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n", 136 pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
126 buf->header_crc, computed_crc); 137 buf->header_crc, computed_crc);
@@ -156,18 +167,26 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
156 curpart++; 167 curpart++;
157 168
158 if (kernellen > 0) { 169 if (kernellen > 0) {
159 parts[curpart].name = "kernel"; 170 int kernelpart = curpart;
160 parts[curpart].offset = kerneladdr; 171
161 parts[curpart].size = kernellen; 172 if (rootfslen > 0 && rootfs_first)
173 kernelpart++;
174 parts[kernelpart].name = "kernel";
175 parts[kernelpart].offset = kerneladdr;
176 parts[kernelpart].size = kernellen;
162 curpart++; 177 curpart++;
163 } 178 }
164 179
165 if (rootfslen > 0) { 180 if (rootfslen > 0) {
166 parts[curpart].name = "rootfs"; 181 int rootfspart = curpart;
167 parts[curpart].offset = rootfsaddr; 182
168 parts[curpart].size = rootfslen; 183 if (kernellen > 0 && rootfs_first)
169 if (sparelen > 0) 184 rootfspart--;
170 parts[curpart].size += sparelen; 185 parts[rootfspart].name = "rootfs";
186 parts[rootfspart].offset = rootfsaddr;
187 parts[rootfspart].size = rootfslen;
188 if (sparelen > 0 && !rootfs_first)
189 parts[rootfspart].size += sparelen;
171 curpart++; 190 curpart++;
172 } 191 }
173 192
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d02592e6a0f0..22d0493a026f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -317,7 +317,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
317 317
318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
320 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); 320 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
321 } 321 }
322} 322}
323 323
@@ -328,10 +328,23 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
328 328
329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
331 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); 331 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
332 } 332 }
333} 333}
334 334
335static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
336{
337 struct map_info *map = mtd->priv;
338 struct cfi_private *cfi = map->fldrv_priv;
339
340 /*
341 * S29NS512P flash uses more than 8bits to report number of sectors,
342 * which is not permitted by CFI.
343 */
344 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
345 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
346}
347
335/* Used to fix CFI-Tables of chips without Extended Query Tables */ 348/* Used to fix CFI-Tables of chips without Extended Query Tables */
336static struct cfi_fixup cfi_nopri_fixup_table[] = { 349static struct cfi_fixup cfi_nopri_fixup_table[] = {
337 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 350 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -362,6 +375,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
362 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 375 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
363 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 376 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
364 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 377 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
378 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
365 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 379 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
366 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 380 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
367 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 381 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index ddf9ec6d9168..4558e0f4d07f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -70,7 +70,7 @@ struct cmdline_mtd_partition {
70/* mtdpart_setup() parses into here */ 70/* mtdpart_setup() parses into here */
71static struct cmdline_mtd_partition *partitions; 71static struct cmdline_mtd_partition *partitions;
72 72
73/* the command line passed to mtdpart_setupd() */ 73/* the command line passed to mtdpart_setup() */
74static char *cmdline; 74static char *cmdline;
75static int cmdline_parsed = 0; 75static int cmdline_parsed = 0;
76 76
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index a4a80b742e65..681e2ee0f2d6 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -52,8 +52,6 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
52 52
53 while (pages) { 53 while (pages) {
54 page = page_read(mapping, index); 54 page = page_read(mapping, index);
55 if (!page)
56 return -ENOMEM;
57 if (IS_ERR(page)) 55 if (IS_ERR(page))
58 return PTR_ERR(page); 56 return PTR_ERR(page);
59 57
@@ -112,8 +110,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
112 len = len - cpylen; 110 len = len - cpylen;
113 111
114 page = page_read(dev->blkdev->bd_inode->i_mapping, index); 112 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
115 if (!page)
116 return -ENOMEM;
117 if (IS_ERR(page)) 113 if (IS_ERR(page))
118 return PTR_ERR(page); 114 return PTR_ERR(page);
119 115
@@ -148,8 +144,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
148 len = len - cpylen; 144 len = len - cpylen;
149 145
150 page = page_read(mapping, index); 146 page = page_read(mapping, index);
151 if (!page)
152 return -ENOMEM;
153 if (IS_ERR(page)) 147 if (IS_ERR(page))
154 return PTR_ERR(page); 148 return PTR_ERR(page);
155 149
@@ -271,7 +265,6 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
271 dev->mtd.flags = MTD_CAP_RAM; 265 dev->mtd.flags = MTD_CAP_RAM;
272 dev->mtd._erase = block2mtd_erase; 266 dev->mtd._erase = block2mtd_erase;
273 dev->mtd._write = block2mtd_write; 267 dev->mtd._write = block2mtd_write;
274 dev->mtd._writev = mtd_writev;
275 dev->mtd._sync = block2mtd_sync; 268 dev->mtd._sync = block2mtd_sync;
276 dev->mtd._read = block2mtd_read; 269 dev->mtd._read = block2mtd_read;
277 dev->mtd.priv = dev; 270 dev->mtd.priv = dev;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 50aa90aa7a7f..f70854d728fe 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -227,7 +227,7 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
227 u8 data8, *dst8; 227 u8 data8, *dst8;
228 228
229 doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len); 229 doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
230 cdr = len & 0x3; 230 cdr = len & 0x1;
231 len4 = len - cdr; 231 len4 = len - cdr;
232 232
233 if (first) 233 if (first)
@@ -732,12 +732,24 @@ err:
732 * @len: the number of bytes to be read (must be a multiple of 4) 732 * @len: the number of bytes to be read (must be a multiple of 4)
733 * @buf: the buffer to be filled in (or NULL is forget bytes) 733 * @buf: the buffer to be filled in (or NULL is forget bytes)
734 * @first: 1 if first time read, DOC_READADDRESS should be set 734 * @first: 1 if first time read, DOC_READADDRESS should be set
735 * @last_odd: 1 if last read ended up on an odd byte
736 *
737 * Reads bytes from a prepared page. There is a trickery here : if the last read
738 * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
739 * planes, the first byte must be read apart. If a word (16bit) read was used,
740 * the read would return the byte of plane 2 as low *and* high endian, which
741 * will mess the read.
735 * 742 *
736 */ 743 */
737static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf, 744static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
738 int first) 745 int first, int last_odd)
739{ 746{
740 doc_read_data_area(docg3, buf, len, first); 747 if (last_odd && len > 0) {
748 doc_read_data_area(docg3, buf, 1, first);
749 doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
750 } else {
751 doc_read_data_area(docg3, buf, len, first);
752 }
741 doc_delay(docg3, 2); 753 doc_delay(docg3, 2);
742 return len; 754 return len;
743} 755}
@@ -850,6 +862,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
850 u8 *buf = ops->datbuf; 862 u8 *buf = ops->datbuf;
851 size_t len, ooblen, nbdata, nboob; 863 size_t len, ooblen, nbdata, nboob;
852 u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1; 864 u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
865 int max_bitflips = 0;
853 866
854 if (buf) 867 if (buf)
855 len = ops->len; 868 len = ops->len;
@@ -876,7 +889,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
876 ret = 0; 889 ret = 0;
877 skip = from % DOC_LAYOUT_PAGE_SIZE; 890 skip = from % DOC_LAYOUT_PAGE_SIZE;
878 mutex_lock(&docg3->cascade->lock); 891 mutex_lock(&docg3->cascade->lock);
879 while (!ret && (len > 0 || ooblen > 0)) { 892 while (ret >= 0 && (len > 0 || ooblen > 0)) {
880 calc_block_sector(from - skip, &block0, &block1, &page, &ofs, 893 calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
881 docg3->reliable); 894 docg3->reliable);
882 nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip); 895 nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
@@ -887,20 +900,20 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
887 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); 900 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
888 if (ret < 0) 901 if (ret < 0)
889 goto err_in_read; 902 goto err_in_read;
890 ret = doc_read_page_getbytes(docg3, skip, NULL, 1); 903 ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
891 if (ret < skip) 904 if (ret < skip)
892 goto err_in_read; 905 goto err_in_read;
893 ret = doc_read_page_getbytes(docg3, nbdata, buf, 0); 906 ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
894 if (ret < nbdata) 907 if (ret < nbdata)
895 goto err_in_read; 908 goto err_in_read;
896 doc_read_page_getbytes(docg3, 909 doc_read_page_getbytes(docg3,
897 DOC_LAYOUT_PAGE_SIZE - nbdata - skip, 910 DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
898 NULL, 0); 911 NULL, 0, (skip + nbdata) % 2);
899 ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0); 912 ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
900 if (ret < nboob) 913 if (ret < nboob)
901 goto err_in_read; 914 goto err_in_read;
902 doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob, 915 doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
903 NULL, 0); 916 NULL, 0, nboob % 2);
904 917
905 doc_get_bch_hw_ecc(docg3, hwecc); 918 doc_get_bch_hw_ecc(docg3, hwecc);
906 eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); 919 eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
@@ -936,7 +949,8 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
936 } 949 }
937 if (ret > 0) { 950 if (ret > 0) {
938 mtd->ecc_stats.corrected += ret; 951 mtd->ecc_stats.corrected += ret;
939 ret = -EUCLEAN; 952 max_bitflips = max(max_bitflips, ret);
953 ret = max_bitflips;
940 } 954 }
941 } 955 }
942 956
@@ -1004,7 +1018,7 @@ static int doc_reload_bbt(struct docg3 *docg3)
1004 DOC_LAYOUT_PAGE_SIZE); 1018 DOC_LAYOUT_PAGE_SIZE);
1005 if (!ret) 1019 if (!ret)
1006 doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE, 1020 doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
1007 buf, 1); 1021 buf, 1, 0);
1008 buf += DOC_LAYOUT_PAGE_SIZE; 1022 buf += DOC_LAYOUT_PAGE_SIZE;
1009 } 1023 }
1010 doc_read_page_finish(docg3); 1024 doc_read_page_finish(docg3);
@@ -1064,10 +1078,10 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
1064 ret = doc_reset_seq(docg3); 1078 ret = doc_reset_seq(docg3);
1065 if (!ret) 1079 if (!ret)
1066 ret = doc_read_page_prepare(docg3, block0, block1, page, 1080 ret = doc_read_page_prepare(docg3, block0, block1, page,
1067 ofs + DOC_LAYOUT_WEAR_OFFSET); 1081 ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
1068 if (!ret) 1082 if (!ret)
1069 ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE, 1083 ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
1070 buf, 1); 1084 buf, 1, 0);
1071 doc_read_page_finish(docg3); 1085 doc_read_page_finish(docg3);
1072 1086
1073 if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK)) 1087 if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 1924d247c1cb..5d0d68c3fe27 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -639,12 +639,16 @@ static const struct spi_device_id m25p_ids[] = {
639 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, 639 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
640 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, 640 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
641 641
642 /* Everspin */
643 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
644
642 /* Intel/Numonyx -- xxxs33b */ 645 /* Intel/Numonyx -- xxxs33b */
643 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, 646 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
644 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, 647 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
645 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, 648 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
646 649
647 /* Macronix */ 650 /* Macronix */
651 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
648 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, 652 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
649 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, 653 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
650 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, 654 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
@@ -728,6 +732,7 @@ static const struct spi_device_id m25p_ids[] = {
728 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, 732 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
729 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, 733 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
730 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 734 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
735 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
731 736
732 /* Catalyst / On Semiconductor -- non-JEDEC */ 737 /* Catalyst / On Semiconductor -- non-JEDEC */
733 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, 738 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 797d43cd3550..67960362681e 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -990,9 +990,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
990 goto err_clk; 990 goto err_clk;
991 } 991 }
992 992
993 ret = clk_enable(dev->clk); 993 ret = clk_prepare_enable(dev->clk);
994 if (ret) 994 if (ret)
995 goto err_clk_enable; 995 goto err_clk_prepare_enable;
996 996
997 ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev); 997 ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
998 if (ret) { 998 if (ret) {
@@ -1020,8 +1020,8 @@ err_bank_setup:
1020 free_irq(irq, dev); 1020 free_irq(irq, dev);
1021 platform_set_drvdata(pdev, NULL); 1021 platform_set_drvdata(pdev, NULL);
1022err_irq: 1022err_irq:
1023 clk_disable(dev->clk); 1023 clk_disable_unprepare(dev->clk);
1024err_clk_enable: 1024err_clk_prepare_enable:
1025 clk_put(dev->clk); 1025 clk_put(dev->clk);
1026err_clk: 1026err_clk:
1027 iounmap(dev->io_base); 1027 iounmap(dev->io_base);
@@ -1074,7 +1074,7 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
1074 irq = platform_get_irq(pdev, 0); 1074 irq = platform_get_irq(pdev, 0);
1075 free_irq(irq, dev); 1075 free_irq(irq, dev);
1076 1076
1077 clk_disable(dev->clk); 1077 clk_disable_unprepare(dev->clk);
1078 clk_put(dev->clk); 1078 clk_put(dev->clk);
1079 iounmap(dev->io_base); 1079 iounmap(dev->io_base);
1080 kfree(dev); 1080 kfree(dev);
@@ -1091,7 +1091,7 @@ int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
1091 struct spear_smi *dev = platform_get_drvdata(pdev); 1091 struct spear_smi *dev = platform_get_drvdata(pdev);
1092 1092
1093 if (dev && dev->clk) 1093 if (dev && dev->clk)
1094 clk_disable(dev->clk); 1094 clk_disable_unprepare(dev->clk);
1095 1095
1096 return 0; 1096 return 0;
1097} 1097}
@@ -1102,7 +1102,7 @@ int spear_smi_resume(struct platform_device *pdev)
1102 int ret = -EPERM; 1102 int ret = -EPERM;
1103 1103
1104 if (dev && dev->clk) 1104 if (dev && dev->clk)
1105 ret = clk_enable(dev->clk); 1105 ret = clk_prepare_enable(dev->clk);
1106 1106
1107 if (!ret) 1107 if (!ret)
1108 spear_smi_hw_init(dev); 1108 spear_smi_hw_init(dev);
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index dbfe17baf046..45abed67f1ef 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -57,7 +57,7 @@ static struct qinfo_query_info qinfo_array[] = {
57 57
58static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str) 58static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
59{ 59{
60 int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info); 60 int qinfo_lines = ARRAY_SIZE(qinfo_array);
61 int i; 61 int i;
62 int bankwidth = map_bankwidth(map) * 8; 62 int bankwidth = map_bankwidth(map) * 8;
63 int major, minor; 63 int major, minor;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8af67cfd671a..5ba2458e799a 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -224,7 +224,7 @@ config MTD_CK804XROM
224 224
225config MTD_SCB2_FLASH 225config MTD_SCB2_FLASH
226 tristate "BIOS flash chip on Intel SCB2 boards" 226 tristate "BIOS flash chip on Intel SCB2 boards"
227 depends on X86 && MTD_JEDECPROBE 227 depends on X86 && MTD_JEDECPROBE && PCI
228 help 228 help
229 Support for treating the BIOS flash chip on Intel SCB2 boards 229 Support for treating the BIOS flash chip on Intel SCB2 boards
230 as an MTD device - with this you can reprogram your BIOS. 230 as an MTD device - with this you can reprogram your BIOS.
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 92e1f41634c7..93f03175c82d 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -260,18 +260,7 @@ static struct pci_driver vr_nor_pci_driver = {
260 .id_table = vr_nor_pci_ids, 260 .id_table = vr_nor_pci_ids,
261}; 261};
262 262
263static int __init vr_nor_mtd_init(void) 263module_pci_driver(vr_nor_pci_driver);
264{
265 return pci_register_driver(&vr_nor_pci_driver);
266}
267
268static void __exit vr_nor_mtd_exit(void)
269{
270 pci_unregister_driver(&vr_nor_pci_driver);
271}
272
273module_init(vr_nor_mtd_init);
274module_exit(vr_nor_mtd_exit);
275 264
276MODULE_AUTHOR("Andy Lowe"); 265MODULE_AUTHOR("Andy Lowe");
277MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range"); 266MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1d005a3e9b41..f14ce0af763f 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -352,18 +352,7 @@ static struct pci_driver mtd_pci_driver = {
352 .id_table = mtd_pci_ids, 352 .id_table = mtd_pci_ids,
353}; 353};
354 354
355static int __init mtd_pci_maps_init(void) 355module_pci_driver(mtd_pci_driver);
356{
357 return pci_register_driver(&mtd_pci_driver);
358}
359
360static void __exit mtd_pci_maps_exit(void)
361{
362 pci_unregister_driver(&mtd_pci_driver);
363}
364
365module_init(mtd_pci_maps_init);
366module_exit(mtd_pci_maps_exit);
367 356
368MODULE_LICENSE("GPL"); 357MODULE_LICENSE("GPL");
369MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 358MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 934a72c80078..9dcbc684abdb 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -234,20 +234,7 @@ static struct pci_driver scb2_flash_driver = {
234 .remove = __devexit_p(scb2_flash_remove), 234 .remove = __devexit_p(scb2_flash_remove),
235}; 235};
236 236
237static int __init 237module_pci_driver(scb2_flash_driver);
238scb2_flash_init(void)
239{
240 return pci_register_driver(&scb2_flash_driver);
241}
242
243static void __exit
244scb2_flash_exit(void)
245{
246 pci_unregister_driver(&scb2_flash_driver);
247}
248
249module_init(scb2_flash_init);
250module_exit(scb2_flash_exit);
251 238
252MODULE_LICENSE("GPL"); 239MODULE_LICENSE("GPL");
253MODULE_AUTHOR("Tim Hockin <thockin@sun.com>"); 240MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 71b0ba797912..e7534c82f93a 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -59,7 +59,7 @@ static struct mtd_partition bigflash_parts[] = {
59 } 59 }
60}; 60};
61 61
62static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL}; 62static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
63 63
64#define init_sbc82xx_one_flash(map, br, or) \ 64#define init_sbc82xx_one_flash(map, br, or) \
65do { \ 65do { \
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c837507dfb1c..575730744fdb 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -250,6 +250,43 @@ static ssize_t mtd_name_show(struct device *dev,
250} 250}
251static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); 251static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
252 252
253static ssize_t mtd_ecc_strength_show(struct device *dev,
254 struct device_attribute *attr, char *buf)
255{
256 struct mtd_info *mtd = dev_get_drvdata(dev);
257
258 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
259}
260static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
261
262static ssize_t mtd_bitflip_threshold_show(struct device *dev,
263 struct device_attribute *attr,
264 char *buf)
265{
266 struct mtd_info *mtd = dev_get_drvdata(dev);
267
268 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
269}
270
271static ssize_t mtd_bitflip_threshold_store(struct device *dev,
272 struct device_attribute *attr,
273 const char *buf, size_t count)
274{
275 struct mtd_info *mtd = dev_get_drvdata(dev);
276 unsigned int bitflip_threshold;
277 int retval;
278
279 retval = kstrtouint(buf, 0, &bitflip_threshold);
280 if (retval)
281 return retval;
282
283 mtd->bitflip_threshold = bitflip_threshold;
284 return count;
285}
286static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
287 mtd_bitflip_threshold_show,
288 mtd_bitflip_threshold_store);
289
253static struct attribute *mtd_attrs[] = { 290static struct attribute *mtd_attrs[] = {
254 &dev_attr_type.attr, 291 &dev_attr_type.attr,
255 &dev_attr_flags.attr, 292 &dev_attr_flags.attr,
@@ -260,6 +297,8 @@ static struct attribute *mtd_attrs[] = {
260 &dev_attr_oobsize.attr, 297 &dev_attr_oobsize.attr,
261 &dev_attr_numeraseregions.attr, 298 &dev_attr_numeraseregions.attr,
262 &dev_attr_name.attr, 299 &dev_attr_name.attr,
300 &dev_attr_ecc_strength.attr,
301 &dev_attr_bitflip_threshold.attr,
263 NULL, 302 NULL,
264}; 303};
265 304
@@ -322,6 +361,10 @@ int add_mtd_device(struct mtd_info *mtd)
322 mtd->index = i; 361 mtd->index = i;
323 mtd->usecount = 0; 362 mtd->usecount = 0;
324 363
364 /* default value if not set by driver */
365 if (mtd->bitflip_threshold == 0)
366 mtd->bitflip_threshold = mtd->ecc_strength;
367
325 if (is_power_of_2(mtd->erasesize)) 368 if (is_power_of_2(mtd->erasesize))
326 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 369 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
327 else 370 else
@@ -757,12 +800,24 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
757int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 800int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
758 u_char *buf) 801 u_char *buf)
759{ 802{
803 int ret_code;
760 *retlen = 0; 804 *retlen = 0;
761 if (from < 0 || from > mtd->size || len > mtd->size - from) 805 if (from < 0 || from > mtd->size || len > mtd->size - from)
762 return -EINVAL; 806 return -EINVAL;
763 if (!len) 807 if (!len)
764 return 0; 808 return 0;
765 return mtd->_read(mtd, from, len, retlen, buf); 809
810 /*
811 * In the absence of an error, drivers return a non-negative integer
812 * representing the maximum number of bitflips that were corrected on
813 * any one ecc region (if applicable; zero otherwise).
814 */
815 ret_code = mtd->_read(mtd, from, len, retlen, buf);
816 if (unlikely(ret_code < 0))
817 return ret_code;
818 if (mtd->ecc_strength == 0)
819 return 0; /* device lacks ecc */
820 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
766} 821}
767EXPORT_SYMBOL_GPL(mtd_read); 822EXPORT_SYMBOL_GPL(mtd_read);
768 823
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index ae36d7e1e913..551e316e4454 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -304,32 +304,17 @@ static void find_next_position(struct mtdoops_context *cxt)
304} 304}
305 305
306static void mtdoops_do_dump(struct kmsg_dumper *dumper, 306static void mtdoops_do_dump(struct kmsg_dumper *dumper,
307 enum kmsg_dump_reason reason, const char *s1, unsigned long l1, 307 enum kmsg_dump_reason reason)
308 const char *s2, unsigned long l2)
309{ 308{
310 struct mtdoops_context *cxt = container_of(dumper, 309 struct mtdoops_context *cxt = container_of(dumper,
311 struct mtdoops_context, dump); 310 struct mtdoops_context, dump);
312 unsigned long s1_start, s2_start;
313 unsigned long l1_cpy, l2_cpy;
314 char *dst;
315
316 if (reason != KMSG_DUMP_OOPS &&
317 reason != KMSG_DUMP_PANIC)
318 return;
319 311
320 /* Only dump oopses if dump_oops is set */ 312 /* Only dump oopses if dump_oops is set */
321 if (reason == KMSG_DUMP_OOPS && !dump_oops) 313 if (reason == KMSG_DUMP_OOPS && !dump_oops)
322 return; 314 return;
323 315
324 dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ 316 kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
325 l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); 317 record_size - MTDOOPS_HEADER_SIZE, NULL);
326 l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
327
328 s2_start = l2 - l2_cpy;
329 s1_start = l1 - l1_cpy;
330
331 memcpy(dst, s1 + s1_start, l1_cpy);
332 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
333 318
334 /* Panics must be written immediately */ 319 /* Panics must be written immediately */
335 if (reason != KMSG_DUMP_OOPS) 320 if (reason != KMSG_DUMP_OOPS)
@@ -375,6 +360,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
375 return; 360 return;
376 } 361 }
377 362
363 cxt->dump.max_reason = KMSG_DUMP_OOPS;
378 cxt->dump.dump = mtdoops_do_dump; 364 cxt->dump.dump = mtdoops_do_dump;
379 err = kmsg_dump_register(&cxt->dump); 365 err = kmsg_dump_register(&cxt->dump);
380 if (err) { 366 if (err) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 9651c06de0a9..d518e4db8a0b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -67,12 +67,12 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
67 stats = part->master->ecc_stats; 67 stats = part->master->ecc_stats;
68 res = part->master->_read(part->master, from + part->offset, len, 68 res = part->master->_read(part->master, from + part->offset, len,
69 retlen, buf); 69 retlen, buf);
70 if (unlikely(res)) { 70 if (unlikely(mtd_is_eccerr(res)))
71 if (mtd_is_bitflip(res)) 71 mtd->ecc_stats.failed +=
72 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; 72 part->master->ecc_stats.failed - stats.failed;
73 if (mtd_is_eccerr(res)) 73 else
74 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; 74 mtd->ecc_stats.corrected +=
75 } 75 part->master->ecc_stats.corrected - stats.corrected;
76 return res; 76 return res;
77} 77}
78 78
@@ -517,6 +517,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
517 517
518 slave->mtd.ecclayout = master->ecclayout; 518 slave->mtd.ecclayout = master->ecclayout;
519 slave->mtd.ecc_strength = master->ecc_strength; 519 slave->mtd.ecc_strength = master->ecc_strength;
520 slave->mtd.bitflip_threshold = master->bitflip_threshold;
521
520 if (master->_block_isbad) { 522 if (master->_block_isbad) {
521 uint64_t offs = 0; 523 uint64_t offs = 0;
522 524
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7d17cecad69d..31bb7e5b504a 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -115,6 +115,46 @@ config MTD_NAND_OMAP2
115 Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4 115 Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
116 platforms. 116 platforms.
117 117
118config MTD_NAND_OMAP_BCH
119 depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
120 bool "Enable support for hardware BCH error correction"
121 default n
122 select BCH
123 select BCH_CONST_PARAMS
124 help
125 Support for hardware BCH error correction.
126
127choice
128 prompt "BCH error correction capability"
129 depends on MTD_NAND_OMAP_BCH
130
131config MTD_NAND_OMAP_BCH8
132 bool "8 bits / 512 bytes (recommended)"
133 help
134 Support correcting up to 8 bitflips per 512-byte block.
135 This will use 13 bytes of spare area per 512 bytes of page data.
136 This is the recommended mode, as 4-bit mode does not work
137 on some OMAP3 revisions, due to a hardware bug.
138
139config MTD_NAND_OMAP_BCH4
140 bool "4 bits / 512 bytes"
141 help
142 Support correcting up to 4 bitflips per 512-byte block.
143 This will use 7 bytes of spare area per 512 bytes of page data.
144 Note that this mode does not work on some OMAP3 revisions, due to a
145 hardware bug. Please check your OMAP datasheet before selecting this
146 mode.
147
148endchoice
149
150if MTD_NAND_OMAP_BCH
151config BCH_CONST_M
152 default 13
153config BCH_CONST_T
154 default 4 if MTD_NAND_OMAP_BCH4
155 default 8 if MTD_NAND_OMAP_BCH8
156endif
157
118config MTD_NAND_IDS 158config MTD_NAND_IDS
119 tristate 159 tristate
120 160
@@ -440,7 +480,7 @@ config MTD_NAND_NANDSIM
440 480
441config MTD_NAND_GPMI_NAND 481config MTD_NAND_GPMI_NAND
442 bool "GPMI NAND Flash Controller driver" 482 bool "GPMI NAND Flash Controller driver"
443 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28) 483 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q)
444 help 484 help
445 Enables NAND Flash support for IMX23 or IMX28. 485 Enables NAND Flash support for IMX23 or IMX28.
446 The GPMI controller is very powerful, with the help of BCH 486 The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 4f20e1d8bef1..60a0dfdb0808 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -414,7 +414,7 @@ static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
414 } 414 }
415 err = 0; 415 err = 0;
416 if (corrected) 416 if (corrected)
417 err = -EUCLEAN; 417 err = 1; /* return max_bitflips per ecc step */
418 if (uncorrected) 418 if (uncorrected)
419 err = -EBADMSG; 419 err = -EBADMSG;
420out: 420out:
@@ -446,7 +446,7 @@ static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
446 } 446 }
447 err = 0; 447 err = 0;
448 if (corrected) 448 if (corrected)
449 err = -EUCLEAN; 449 err = 1; /* return max_bitflips per ecc step */
450 if (uncorrected) 450 if (uncorrected)
451 err = -EBADMSG; 451 err = -EBADMSG;
452 return err; 452 return err;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2165576a1c67..97ac6712bb19 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -324,9 +324,10 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
324 * mtd: mtd info structure 324 * mtd: mtd info structure
325 * chip: nand chip info structure 325 * chip: nand chip info structure
326 * buf: buffer to store read data 326 * buf: buffer to store read data
327 * oob_required: caller expects OOB data read to chip->oob_poi
327 */ 328 */
328static int atmel_nand_read_page(struct mtd_info *mtd, 329static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
329 struct nand_chip *chip, uint8_t *buf, int page) 330 uint8_t *buf, int oob_required, int page)
330{ 331{
331 int eccsize = chip->ecc.size; 332 int eccsize = chip->ecc.size;
332 int eccbytes = chip->ecc.bytes; 333 int eccbytes = chip->ecc.bytes;
@@ -335,6 +336,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
335 uint8_t *oob = chip->oob_poi; 336 uint8_t *oob = chip->oob_poi;
336 uint8_t *ecc_pos; 337 uint8_t *ecc_pos;
337 int stat; 338 int stat;
339 unsigned int max_bitflips = 0;
338 340
339 /* 341 /*
340 * Errata: ALE is incorrectly wired up to the ECC controller 342 * Errata: ALE is incorrectly wired up to the ECC controller
@@ -371,10 +373,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
371 /* check if there's an error */ 373 /* check if there's an error */
372 stat = chip->ecc.correct(mtd, p, oob, NULL); 374 stat = chip->ecc.correct(mtd, p, oob, NULL);
373 375
374 if (stat < 0) 376 if (stat < 0) {
375 mtd->ecc_stats.failed++; 377 mtd->ecc_stats.failed++;
376 else 378 } else {
377 mtd->ecc_stats.corrected += stat; 379 mtd->ecc_stats.corrected += stat;
380 max_bitflips = max_t(unsigned int, max_bitflips, stat);
381 }
378 382
379 /* get back to oob start (end of page) */ 383 /* get back to oob start (end of page) */
380 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); 384 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
@@ -382,7 +386,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
382 /* read the oob */ 386 /* read the oob */
383 chip->read_buf(mtd, oob, mtd->oobsize); 387 chip->read_buf(mtd, oob, mtd->oobsize);
384 388
385 return 0; 389 return max_bitflips;
386} 390}
387 391
388/* 392/*
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 73abbc3e093e..9f609d2dcf62 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -508,8 +508,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev)
508 this->chip_delay = 30; 508 this->chip_delay = 30;
509 this->ecc.mode = NAND_ECC_SOFT; 509 this->ecc.mode = NAND_ECC_SOFT;
510 510
511 this->options = NAND_NO_AUTOINCR;
512
513 if (pd->devwidth) 511 if (pd->devwidth)
514 this->options |= NAND_BUSWIDTH_16; 512 this->options |= NAND_BUSWIDTH_16;
515 513
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
index a930666d0687..5914bb32e001 100644
--- a/drivers/mtd/nand/bcm_umi_bch.c
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -22,9 +22,9 @@
22 22
23/* ---- Private Function Prototypes -------------------------------------- */ 23/* ---- Private Function Prototypes -------------------------------------- */
24static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, 24static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
25 struct nand_chip *chip, uint8_t *buf, int page); 25 struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
26static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, 26static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
27 struct nand_chip *chip, const uint8_t *buf); 27 struct nand_chip *chip, const uint8_t *buf, int oob_required);
28 28
29/* ---- Private Variables ------------------------------------------------ */ 29/* ---- Private Variables ------------------------------------------------ */
30 30
@@ -103,11 +103,12 @@ static struct nand_ecclayout nand_hw_eccoob_4096 = {
103* @mtd: mtd info structure 103* @mtd: mtd info structure
104* @chip: nand chip info structure 104* @chip: nand chip info structure
105* @buf: buffer to store read data 105* @buf: buffer to store read data
106* @oob_required: caller expects OOB data read to chip->oob_poi
106* 107*
107***************************************************************************/ 108***************************************************************************/
108static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, 109static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
109 struct nand_chip *chip, uint8_t * buf, 110 struct nand_chip *chip, uint8_t * buf,
110 int page) 111 int oob_required, int page)
111{ 112{
112 int sectorIdx = 0; 113 int sectorIdx = 0;
113 int eccsize = chip->ecc.size; 114 int eccsize = chip->ecc.size;
@@ -116,6 +117,7 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
116 uint8_t eccCalc[NAND_ECC_NUM_BYTES]; 117 uint8_t eccCalc[NAND_ECC_NUM_BYTES];
117 int sectorOobSize = mtd->oobsize / eccsteps; 118 int sectorOobSize = mtd->oobsize / eccsteps;
118 int stat; 119 int stat;
120 unsigned int max_bitflips = 0;
119 121
120 for (sectorIdx = 0; sectorIdx < eccsteps; 122 for (sectorIdx = 0; sectorIdx < eccsteps;
121 sectorIdx++, datap += eccsize) { 123 sectorIdx++, datap += eccsize) {
@@ -177,9 +179,10 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
177 } 179 }
178#endif 180#endif
179 mtd->ecc_stats.corrected += stat; 181 mtd->ecc_stats.corrected += stat;
182 max_bitflips = max_t(unsigned int, max_bitflips, stat);
180 } 183 }
181 } 184 }
182 return 0; 185 return max_bitflips;
183} 186}
184 187
185/**************************************************************************** 188/****************************************************************************
@@ -188,10 +191,11 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
188* @mtd: mtd info structure 191* @mtd: mtd info structure
189* @chip: nand chip info structure 192* @chip: nand chip info structure
190* @buf: data buffer 193* @buf: data buffer
194* @oob_required: must write chip->oob_poi to OOB
191* 195*
192***************************************************************************/ 196***************************************************************************/
193static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, 197static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
194 struct nand_chip *chip, const uint8_t *buf) 198 struct nand_chip *chip, const uint8_t *buf, int oob_required)
195{ 199{
196 int sectorIdx = 0; 200 int sectorIdx = 0;
197 int eccsize = chip->ecc.size; 201 int eccsize = chip->ecc.size;
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 6908cdde3065..c855e7cd337b 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -341,7 +341,7 @@ static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
341 * for MLC parts which may have permanently stuck bits. 341 * for MLC parts which may have permanently stuck bits.
342 */ 342 */
343 struct nand_chip *chip = mtd->priv; 343 struct nand_chip *chip = mtd->priv;
344 int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0); 344 int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0);
345 if (ret < 0) 345 if (ret < 0)
346 return -EFAULT; 346 return -EFAULT;
347 else { 347 else {
@@ -476,12 +476,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
476 this->badblock_pattern = &largepage_bbt; 476 this->badblock_pattern = &largepage_bbt;
477 } 477 }
478 478
479 /* 479 this->ecc.strength = 8;
480 * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
481 * conservative guess, given 13 ecc bytes and using bch alg.
482 * (Assume Galois field order m=15 to allow a margin of error.)
483 */
484 this->ecc.strength = 6;
485 480
486#endif 481#endif
487 482
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index d7b86b925de5..3f1c18599cbd 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -558,7 +558,7 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
558} 558}
559 559
560static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 560static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
561 uint8_t *buf, int page) 561 uint8_t *buf, int oob_required, int page)
562{ 562{
563 bf5xx_nand_read_buf(mtd, buf, mtd->writesize); 563 bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
564 bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize); 564 bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -567,7 +567,7 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
567} 567}
568 568
569static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 569static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
570 const uint8_t *buf) 570 const uint8_t *buf, int oob_required)
571{ 571{
572 bf5xx_nand_write_buf(mtd, buf, mtd->writesize); 572 bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
573 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); 573 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 2a96e1a12062..41371ba1a811 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -364,25 +364,27 @@ static int cafe_nand_write_oob(struct mtd_info *mtd,
364 364
365/* Don't use -- use nand_read_oob_std for now */ 365/* Don't use -- use nand_read_oob_std for now */
366static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 366static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
367 int page, int sndcmd) 367 int page)
368{ 368{
369 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 369 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
370 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 370 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
371 return 1; 371 return 0;
372} 372}
373/** 373/**
374 * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read 374 * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
375 * @mtd: mtd info structure 375 * @mtd: mtd info structure
376 * @chip: nand chip info structure 376 * @chip: nand chip info structure
377 * @buf: buffer to store read data 377 * @buf: buffer to store read data
378 * @oob_required: caller expects OOB data read to chip->oob_poi
378 * 379 *
379 * The hw generator calculates the error syndrome automatically. Therefor 380 * The hw generator calculates the error syndrome automatically. Therefor
380 * we need a special oob layout and handling. 381 * we need a special oob layout and handling.
381 */ 382 */
382static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, 383static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
383 uint8_t *buf, int page) 384 uint8_t *buf, int oob_required, int page)
384{ 385{
385 struct cafe_priv *cafe = mtd->priv; 386 struct cafe_priv *cafe = mtd->priv;
387 unsigned int max_bitflips = 0;
386 388
387 cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n", 389 cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
388 cafe_readl(cafe, NAND_ECC_RESULT), 390 cafe_readl(cafe, NAND_ECC_RESULT),
@@ -449,10 +451,11 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
449 } else { 451 } else {
450 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n); 452 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
451 mtd->ecc_stats.corrected += n; 453 mtd->ecc_stats.corrected += n;
454 max_bitflips = max_t(unsigned int, max_bitflips, n);
452 } 455 }
453 } 456 }
454 457
455 return 0; 458 return max_bitflips;
456} 459}
457 460
458static struct nand_ecclayout cafe_oobinfo_2048 = { 461static struct nand_ecclayout cafe_oobinfo_2048 = {
@@ -518,7 +521,8 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
518 521
519 522
520static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd, 523static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
521 struct nand_chip *chip, const uint8_t *buf) 524 struct nand_chip *chip,
525 const uint8_t *buf, int oob_required)
522{ 526{
523 struct cafe_priv *cafe = mtd->priv; 527 struct cafe_priv *cafe = mtd->priv;
524 528
@@ -530,16 +534,17 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
530} 534}
531 535
532static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 536static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
533 const uint8_t *buf, int page, int cached, int raw) 537 const uint8_t *buf, int oob_required, int page,
538 int cached, int raw)
534{ 539{
535 int status; 540 int status;
536 541
537 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 542 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
538 543
539 if (unlikely(raw)) 544 if (unlikely(raw))
540 chip->ecc.write_page_raw(mtd, chip, buf); 545 chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
541 else 546 else
542 chip->ecc.write_page(mtd, chip, buf); 547 chip->ecc.write_page(mtd, chip, buf, oob_required);
543 548
544 /* 549 /*
545 * Cached progamming disabled for now, Not sure if its worth the 550 * Cached progamming disabled for now, Not sure if its worth the
@@ -685,7 +690,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
685 690
686 /* Enable the following for a flash based bad block table */ 691 /* Enable the following for a flash based bad block table */
687 cafe->nand.bbt_options = NAND_BBT_USE_FLASH; 692 cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
688 cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS; 693 cafe->nand.options = NAND_OWN_BUFFERS;
689 694
690 if (skipbbt) { 695 if (skipbbt) {
691 cafe->nand.options |= NAND_SKIP_BBTSCAN; 696 cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -888,17 +893,7 @@ static struct pci_driver cafe_nand_pci_driver = {
888 .resume = cafe_nand_resume, 893 .resume = cafe_nand_resume,
889}; 894};
890 895
891static int __init cafe_nand_init(void) 896module_pci_driver(cafe_nand_pci_driver);
892{
893 return pci_register_driver(&cafe_nand_pci_driver);
894}
895
896static void __exit cafe_nand_exit(void)
897{
898 pci_unregister_driver(&cafe_nand_pci_driver);
899}
900module_init(cafe_nand_init);
901module_exit(cafe_nand_exit);
902 897
903MODULE_LICENSE("GPL"); 898MODULE_LICENSE("GPL");
904MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 899MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 821c34c62500..adb6c3ef37fb 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -240,7 +240,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
240 240
241 /* Enable the following for a flash based bad block table */ 241 /* Enable the following for a flash based bad block table */
242 this->bbt_options = NAND_BBT_USE_FLASH; 242 this->bbt_options = NAND_BBT_USE_FLASH;
243 this->options = NAND_NO_AUTOINCR;
244 243
245 /* Scan to find existence of the device */ 244 /* Scan to find existence of the device */
246 if (nand_scan(new_mtd, 1)) { 245 if (nand_scan(new_mtd, 1)) {
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index a9e57d686297..0650aafa0dd2 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -924,9 +924,10 @@ bool is_erased(uint8_t *buf, int len)
924#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) 924#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
925 925
926static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, 926static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
927 uint32_t irq_status) 927 uint32_t irq_status, unsigned int *max_bitflips)
928{ 928{
929 bool check_erased_page = false; 929 bool check_erased_page = false;
930 unsigned int bitflips = 0;
930 931
931 if (irq_status & INTR_STATUS__ECC_ERR) { 932 if (irq_status & INTR_STATUS__ECC_ERR) {
932 /* read the ECC errors. we'll ignore them for now */ 933 /* read the ECC errors. we'll ignore them for now */
@@ -965,6 +966,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
965 /* correct the ECC error */ 966 /* correct the ECC error */
966 buf[offset] ^= err_correction_value; 967 buf[offset] ^= err_correction_value;
967 denali->mtd.ecc_stats.corrected++; 968 denali->mtd.ecc_stats.corrected++;
969 bitflips++;
968 } 970 }
969 } else { 971 } else {
970 /* if the error is not correctable, need to 972 /* if the error is not correctable, need to
@@ -984,6 +986,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
984 clear_interrupts(denali); 986 clear_interrupts(denali);
985 denali_set_intr_modes(denali, true); 987 denali_set_intr_modes(denali, true);
986 } 988 }
989 *max_bitflips = bitflips;
987 return check_erased_page; 990 return check_erased_page;
988} 991}
989 992
@@ -1084,7 +1087,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1084 * by write_page above. 1087 * by write_page above.
1085 * */ 1088 * */
1086static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 1089static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1087 const uint8_t *buf) 1090 const uint8_t *buf, int oob_required)
1088{ 1091{
1089 /* for regular page writes, we let HW handle all the ECC 1092 /* for regular page writes, we let HW handle all the ECC
1090 * data written to the device. */ 1093 * data written to the device. */
@@ -1096,7 +1099,7 @@ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1096 * write_page() function above. 1099 * write_page() function above.
1097 */ 1100 */
1098static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1101static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1099 const uint8_t *buf) 1102 const uint8_t *buf, int oob_required)
1100{ 1103{
1101 /* for raw page writes, we want to disable ECC and simply write 1104 /* for raw page writes, we want to disable ECC and simply write
1102 whatever data is in the buffer. */ 1105 whatever data is in the buffer. */
@@ -1110,17 +1113,17 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1110} 1113}
1111 1114
1112static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1115static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1113 int page, int sndcmd) 1116 int page)
1114{ 1117{
1115 read_oob_data(mtd, chip->oob_poi, page); 1118 read_oob_data(mtd, chip->oob_poi, page);
1116 1119
1117 return 0; /* notify NAND core to send command to 1120 return 0;
1118 NAND device. */
1119} 1121}
1120 1122
1121static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 1123static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1122 uint8_t *buf, int page) 1124 uint8_t *buf, int oob_required, int page)
1123{ 1125{
1126 unsigned int max_bitflips;
1124 struct denali_nand_info *denali = mtd_to_denali(mtd); 1127 struct denali_nand_info *denali = mtd_to_denali(mtd);
1125 1128
1126 dma_addr_t addr = denali->buf.dma_buf; 1129 dma_addr_t addr = denali->buf.dma_buf;
@@ -1153,7 +1156,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1153 1156
1154 memcpy(buf, denali->buf.buf, mtd->writesize); 1157 memcpy(buf, denali->buf.buf, mtd->writesize);
1155 1158
1156 check_erased_page = handle_ecc(denali, buf, irq_status); 1159 check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
1157 denali_enable_dma(denali, false); 1160 denali_enable_dma(denali, false);
1158 1161
1159 if (check_erased_page) { 1162 if (check_erased_page) {
@@ -1167,11 +1170,11 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1167 denali->mtd.ecc_stats.failed++; 1170 denali->mtd.ecc_stats.failed++;
1168 } 1171 }
1169 } 1172 }
1170 return 0; 1173 return max_bitflips;
1171} 1174}
1172 1175
1173static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1176static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1174 uint8_t *buf, int page) 1177 uint8_t *buf, int oob_required, int page)
1175{ 1178{
1176 struct denali_nand_info *denali = mtd_to_denali(mtd); 1179 struct denali_nand_info *denali = mtd_to_denali(mtd);
1177 1180
@@ -1702,17 +1705,4 @@ static struct pci_driver denali_pci_driver = {
1702 .remove = denali_pci_remove, 1705 .remove = denali_pci_remove,
1703}; 1706};
1704 1707
1705static int __devinit denali_init(void) 1708module_pci_driver(denali_pci_driver);
1706{
1707 printk(KERN_INFO "Spectra MTD driver\n");
1708 return pci_register_driver(&denali_pci_driver);
1709}
1710
1711/* Free memory */
1712static void __devexit denali_exit(void)
1713{
1714 pci_unregister_driver(&denali_pci_driver);
1715}
1716
1717module_init(denali_init);
1718module_exit(denali_exit);
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index b08202664543..a225e49a5623 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -720,6 +720,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
720 struct docg4_priv *doc = nand->priv; 720 struct docg4_priv *doc = nand->priv;
721 void __iomem *docptr = doc->virtadr; 721 void __iomem *docptr = doc->virtadr;
722 uint16_t status, edc_err, *buf16; 722 uint16_t status, edc_err, *buf16;
723 int bits_corrected = 0;
723 724
724 dev_dbg(doc->dev, "%s: page %08x\n", __func__, page); 725 dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
725 726
@@ -772,7 +773,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
772 773
773 /* If bitflips are reported, attempt to correct with ecc */ 774 /* If bitflips are reported, attempt to correct with ecc */
774 if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) { 775 if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
775 int bits_corrected = correct_data(mtd, buf, page); 776 bits_corrected = correct_data(mtd, buf, page);
776 if (bits_corrected == -EBADMSG) 777 if (bits_corrected == -EBADMSG)
777 mtd->ecc_stats.failed++; 778 mtd->ecc_stats.failed++;
778 else 779 else
@@ -781,24 +782,24 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
781 } 782 }
782 783
783 writew(0, docptr + DOC_DATAEND); 784 writew(0, docptr + DOC_DATAEND);
784 return 0; 785 return bits_corrected;
785} 786}
786 787
787 788
788static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand, 789static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
789 uint8_t *buf, int page) 790 uint8_t *buf, int oob_required, int page)
790{ 791{
791 return read_page(mtd, nand, buf, page, false); 792 return read_page(mtd, nand, buf, page, false);
792} 793}
793 794
794static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand, 795static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
795 uint8_t *buf, int page) 796 uint8_t *buf, int oob_required, int page)
796{ 797{
797 return read_page(mtd, nand, buf, page, true); 798 return read_page(mtd, nand, buf, page, true);
798} 799}
799 800
800static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand, 801static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
801 int page, int sndcmd) 802 int page)
802{ 803{
803 struct docg4_priv *doc = nand->priv; 804 struct docg4_priv *doc = nand->priv;
804 void __iomem *docptr = doc->virtadr; 805 void __iomem *docptr = doc->virtadr;
@@ -952,13 +953,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
952} 953}
953 954
954static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand, 955static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
955 const uint8_t *buf) 956 const uint8_t *buf, int oob_required)
956{ 957{
957 return write_page(mtd, nand, buf, false); 958 return write_page(mtd, nand, buf, false);
958} 959}
959 960
960static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand, 961static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
961 const uint8_t *buf) 962 const uint8_t *buf, int oob_required)
962{ 963{
963 return write_page(mtd, nand, buf, true); 964 return write_page(mtd, nand, buf, true);
964} 965}
@@ -1002,7 +1003,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
1002 return -ENOMEM; 1003 return -ENOMEM;
1003 1004
1004 read_page_prologue(mtd, g4_addr); 1005 read_page_prologue(mtd, g4_addr);
1005 status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE); 1006 status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
1006 if (status) 1007 if (status)
1007 goto exit; 1008 goto exit;
1008 1009
@@ -1079,7 +1080,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
1079 1080
1080 /* write first page of block */ 1081 /* write first page of block */
1081 write_page_prologue(mtd, g4_addr); 1082 write_page_prologue(mtd, g4_addr);
1082 docg4_write_page(mtd, nand, buf); 1083 docg4_write_page(mtd, nand, buf, 1);
1083 ret = pageprog(mtd); 1084 ret = pageprog(mtd);
1084 if (!ret) 1085 if (!ret)
1085 mtd->ecc_stats.badblocks++; 1086 mtd->ecc_stats.badblocks++;
@@ -1192,8 +1193,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
1192 nand->ecc.prepad = 8; 1193 nand->ecc.prepad = 8;
1193 nand->ecc.bytes = 8; 1194 nand->ecc.bytes = 8;
1194 nand->ecc.strength = DOCG4_T; 1195 nand->ecc.strength = DOCG4_T;
1195 nand->options = 1196 nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
1196 NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
1197 nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA; 1197 nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
1198 nand->controller = &nand->hwcontrol; 1198 nand->controller = &nand->hwcontrol;
1199 spin_lock_init(&nand->controller->lock); 1199 spin_lock_init(&nand->controller->lock);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 80b5264f0a32..784293806110 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,6 +75,7 @@ struct fsl_elbc_fcm_ctrl {
75 unsigned int use_mdr; /* Non zero if the MDR is to be set */ 75 unsigned int use_mdr; /* Non zero if the MDR is to be set */
76 unsigned int oob; /* Non zero if operating on OOB data */ 76 unsigned int oob; /* Non zero if operating on OOB data */
77 unsigned int counter; /* counter for the initializations */ 77 unsigned int counter; /* counter for the initializations */
78 unsigned int max_bitflips; /* Saved during READ0 cmd */
78}; 79};
79 80
80/* These map to the positions used by the FCM hardware ECC generator */ 81/* These map to the positions used by the FCM hardware ECC generator */
@@ -253,6 +254,8 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
253 if (chip->ecc.mode != NAND_ECC_HW) 254 if (chip->ecc.mode != NAND_ECC_HW)
254 return 0; 255 return 0;
255 256
257 elbc_fcm_ctrl->max_bitflips = 0;
258
256 if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) { 259 if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
257 uint32_t lteccr = in_be32(&lbc->lteccr); 260 uint32_t lteccr = in_be32(&lbc->lteccr);
258 /* 261 /*
@@ -262,11 +265,16 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
262 * bits 28-31 are uncorrectable errors, marked elsewhere. 265 * bits 28-31 are uncorrectable errors, marked elsewhere.
263 * for small page nand only 1 bit is used. 266 * for small page nand only 1 bit is used.
264 * if the ELBC doesn't have the lteccr register it reads 0 267 * if the ELBC doesn't have the lteccr register it reads 0
268 * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
269 * count the number of sub-pages with bitflips and update
270 * ecc_stats.corrected accordingly.
265 */ 271 */
266 if (lteccr & 0x000F000F) 272 if (lteccr & 0x000F000F)
267 out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */ 273 out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
268 if (lteccr & 0x000F0000) 274 if (lteccr & 0x000F0000) {
269 mtd->ecc_stats.corrected++; 275 mtd->ecc_stats.corrected++;
276 elbc_fcm_ctrl->max_bitflips = 1;
277 }
270 } 278 }
271 279
272 return 0; 280 return 0;
@@ -738,26 +746,28 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
738 return 0; 746 return 0;
739} 747}
740 748
741static int fsl_elbc_read_page(struct mtd_info *mtd, 749static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
742 struct nand_chip *chip, 750 uint8_t *buf, int oob_required, int page)
743 uint8_t *buf,
744 int page)
745{ 751{
752 struct fsl_elbc_mtd *priv = chip->priv;
753 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
754 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
755
746 fsl_elbc_read_buf(mtd, buf, mtd->writesize); 756 fsl_elbc_read_buf(mtd, buf, mtd->writesize);
747 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); 757 if (oob_required)
758 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
748 759
749 if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL) 760 if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
750 mtd->ecc_stats.failed++; 761 mtd->ecc_stats.failed++;
751 762
752 return 0; 763 return elbc_fcm_ctrl->max_bitflips;
753} 764}
754 765
755/* ECC will be calculated automatically, and errors will be detected in 766/* ECC will be calculated automatically, and errors will be detected in
756 * waitfunc. 767 * waitfunc.
757 */ 768 */
758static void fsl_elbc_write_page(struct mtd_info *mtd, 769static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
759 struct nand_chip *chip, 770 const uint8_t *buf, int oob_required)
760 const uint8_t *buf)
761{ 771{
762 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 772 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
763 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 773 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -795,7 +805,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
795 chip->bbt_md = &bbt_mirror_descr; 805 chip->bbt_md = &bbt_mirror_descr;
796 806
797 /* set up nand options */ 807 /* set up nand options */
798 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; 808 chip->options = NAND_NO_READRDY;
799 chip->bbt_options = NAND_BBT_USE_FLASH; 809 chip->bbt_options = NAND_BBT_USE_FLASH;
800 810
801 chip->controller = &elbc_fcm_ctrl->controller; 811 chip->controller = &elbc_fcm_ctrl->controller;
@@ -814,11 +824,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
814 chip->ecc.size = 512; 824 chip->ecc.size = 512;
815 chip->ecc.bytes = 3; 825 chip->ecc.bytes = 3;
816 chip->ecc.strength = 1; 826 chip->ecc.strength = 1;
817 /*
818 * FIXME: can hardware ecc correct 4 bitflips if page size is
819 * 2k? Then does hardware report number of corrections for this
820 * case? If so, ecc_stats reporting needs to be fixed as well.
821 */
822 } else { 827 } else {
823 /* otherwise fall back to default software ECC */ 828 /* otherwise fall back to default software ECC */
824 chip->ecc.mode = NAND_ECC_SOFT; 829 chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index c30ac7b83d28..9602c1b7e27e 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -63,6 +63,7 @@ struct fsl_ifc_nand_ctrl {
63 unsigned int oob; /* Non zero if operating on OOB data */ 63 unsigned int oob; /* Non zero if operating on OOB data */
64 unsigned int eccread; /* Non zero for a full-page ECC read */ 64 unsigned int eccread; /* Non zero for a full-page ECC read */
65 unsigned int counter; /* counter for the initializations */ 65 unsigned int counter; /* counter for the initializations */
66 unsigned int max_bitflips; /* Saved during READ0 cmd */
66}; 67};
67 68
68static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl; 69static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
@@ -262,6 +263,8 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
262 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER) 263 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
263 dev_err(priv->dev, "NAND Flash Write Protect Error\n"); 264 dev_err(priv->dev, "NAND Flash Write Protect Error\n");
264 265
266 nctrl->max_bitflips = 0;
267
265 if (nctrl->eccread) { 268 if (nctrl->eccread) {
266 int errors; 269 int errors;
267 int bufnum = nctrl->page & priv->bufnum_mask; 270 int bufnum = nctrl->page & priv->bufnum_mask;
@@ -290,6 +293,9 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
290 } 293 }
291 294
292 mtd->ecc_stats.corrected += errors; 295 mtd->ecc_stats.corrected += errors;
296 nctrl->max_bitflips = max_t(unsigned int,
297 nctrl->max_bitflips,
298 errors);
293 } 299 }
294 300
295 nctrl->eccread = 0; 301 nctrl->eccread = 0;
@@ -375,21 +381,31 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
375 381
376 return; 382 return;
377 383
378 /* READID must read all 8 possible bytes */
379 case NAND_CMD_READID: 384 case NAND_CMD_READID:
385 case NAND_CMD_PARAM: {
386 int timing = IFC_FIR_OP_RB;
387 if (command == NAND_CMD_PARAM)
388 timing = IFC_FIR_OP_RBCD;
389
380 out_be32(&ifc->ifc_nand.nand_fir0, 390 out_be32(&ifc->ifc_nand.nand_fir0,
381 (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) | 391 (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
382 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 392 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
383 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT)); 393 (timing << IFC_NAND_FIR0_OP2_SHIFT));
384 out_be32(&ifc->ifc_nand.nand_fcr0, 394 out_be32(&ifc->ifc_nand.nand_fcr0,
385 NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT); 395 command << IFC_NAND_FCR0_CMD0_SHIFT);
386 /* 8 bytes for manuf, device and exts */ 396 out_be32(&ifc->ifc_nand.row3, column);
387 out_be32(&ifc->ifc_nand.nand_fbcr, 8); 397
388 ifc_nand_ctrl->read_bytes = 8; 398 /*
399 * although currently it's 8 bytes for READID, we always read
400 * the maximum 256 bytes(for PARAM)
401 */
402 out_be32(&ifc->ifc_nand.nand_fbcr, 256);
403 ifc_nand_ctrl->read_bytes = 256;
389 404
390 set_addr(mtd, 0, 0, 0); 405 set_addr(mtd, 0, 0, 0);
391 fsl_ifc_run_command(mtd); 406 fsl_ifc_run_command(mtd);
392 return; 407 return;
408 }
393 409
394 /* ERASE1 stores the block and page address */ 410 /* ERASE1 stores the block and page address */
395 case NAND_CMD_ERASE1: 411 case NAND_CMD_ERASE1:
@@ -682,15 +698,16 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
682 return nand_fsr | NAND_STATUS_WP; 698 return nand_fsr | NAND_STATUS_WP;
683} 699}
684 700
685static int fsl_ifc_read_page(struct mtd_info *mtd, 701static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
686 struct nand_chip *chip, 702 uint8_t *buf, int oob_required, int page)
687 uint8_t *buf, int page)
688{ 703{
689 struct fsl_ifc_mtd *priv = chip->priv; 704 struct fsl_ifc_mtd *priv = chip->priv;
690 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 705 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
706 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
691 707
692 fsl_ifc_read_buf(mtd, buf, mtd->writesize); 708 fsl_ifc_read_buf(mtd, buf, mtd->writesize);
693 fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize); 709 if (oob_required)
710 fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
694 711
695 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) 712 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
696 dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n"); 713 dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
@@ -698,15 +715,14 @@ static int fsl_ifc_read_page(struct mtd_info *mtd,
698 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) 715 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
699 mtd->ecc_stats.failed++; 716 mtd->ecc_stats.failed++;
700 717
701 return 0; 718 return nctrl->max_bitflips;
702} 719}
703 720
704/* ECC will be calculated automatically, and errors will be detected in 721/* ECC will be calculated automatically, and errors will be detected in
705 * waitfunc. 722 * waitfunc.
706 */ 723 */
707static void fsl_ifc_write_page(struct mtd_info *mtd, 724static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
708 struct nand_chip *chip, 725 const uint8_t *buf, int oob_required)
709 const uint8_t *buf)
710{ 726{
711 fsl_ifc_write_buf(mtd, buf, mtd->writesize); 727 fsl_ifc_write_buf(mtd, buf, mtd->writesize);
712 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 728 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -789,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
789 out_be32(&ifc->ifc_nand.ncfgr, 0x0); 805 out_be32(&ifc->ifc_nand.ncfgr, 0x0);
790 806
791 /* set up nand options */ 807 /* set up nand options */
792 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; 808 chip->options = NAND_NO_READRDY;
793 chip->bbt_options = NAND_BBT_USE_FLASH; 809 chip->bbt_options = NAND_BBT_USE_FLASH;
794 810
795 811
@@ -811,6 +827,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
811 /* Hardware generates ECC per 512 Bytes */ 827 /* Hardware generates ECC per 512 Bytes */
812 chip->ecc.size = 512; 828 chip->ecc.size = 512;
813 chip->ecc.bytes = 8; 829 chip->ecc.bytes = 8;
830 chip->ecc.strength = 4;
814 831
815 switch (csor & CSOR_NAND_PGS_MASK) { 832 switch (csor & CSOR_NAND_PGS_MASK) {
816 case CSOR_NAND_PGS_512: 833 case CSOR_NAND_PGS_512:
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 1b8330e1155a..38d26240d8b1 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -692,6 +692,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
692 * @mtd: mtd info structure 692 * @mtd: mtd info structure
693 * @chip: nand chip info structure 693 * @chip: nand chip info structure
694 * @buf: buffer to store read data 694 * @buf: buffer to store read data
695 * @oob_required: caller expects OOB data read to chip->oob_poi
695 * @page: page number to read 696 * @page: page number to read
696 * 697 *
697 * This routine is needed for fsmc version 8 as reading from NAND chip has to be 698 * This routine is needed for fsmc version 8 as reading from NAND chip has to be
@@ -701,7 +702,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
701 * max of 8 bits) 702 * max of 8 bits)
702 */ 703 */
703static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 704static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
704 uint8_t *buf, int page) 705 uint8_t *buf, int oob_required, int page)
705{ 706{
706 struct fsmc_nand_data *host = container_of(mtd, 707 struct fsmc_nand_data *host = container_of(mtd,
707 struct fsmc_nand_data, mtd); 708 struct fsmc_nand_data, mtd);
@@ -720,6 +721,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
720 */ 721 */
721 uint16_t ecc_oob[7]; 722 uint16_t ecc_oob[7];
722 uint8_t *oob = (uint8_t *)&ecc_oob[0]; 723 uint8_t *oob = (uint8_t *)&ecc_oob[0];
724 unsigned int max_bitflips = 0;
723 725
724 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { 726 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
725 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); 727 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
@@ -748,13 +750,15 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
748 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 750 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
749 751
750 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 752 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
751 if (stat < 0) 753 if (stat < 0) {
752 mtd->ecc_stats.failed++; 754 mtd->ecc_stats.failed++;
753 else 755 } else {
754 mtd->ecc_stats.corrected += stat; 756 mtd->ecc_stats.corrected += stat;
757 max_bitflips = max_t(unsigned int, max_bitflips, stat);
758 }
755 } 759 }
756 760
757 return 0; 761 return max_bitflips;
758} 762}
759 763
760/* 764/*
@@ -994,9 +998,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
994 return PTR_ERR(host->clk); 998 return PTR_ERR(host->clk);
995 } 999 }
996 1000
997 ret = clk_enable(host->clk); 1001 ret = clk_prepare_enable(host->clk);
998 if (ret) 1002 if (ret)
999 goto err_clk_enable; 1003 goto err_clk_prepare_enable;
1000 1004
1001 /* 1005 /*
1002 * This device ID is actually a common AMBA ID as used on the 1006 * This device ID is actually a common AMBA ID as used on the
@@ -1176,8 +1180,8 @@ err_req_write_chnl:
1176 if (host->mode == USE_DMA_ACCESS) 1180 if (host->mode == USE_DMA_ACCESS)
1177 dma_release_channel(host->read_dma_chan); 1181 dma_release_channel(host->read_dma_chan);
1178err_req_read_chnl: 1182err_req_read_chnl:
1179 clk_disable(host->clk); 1183 clk_disable_unprepare(host->clk);
1180err_clk_enable: 1184err_clk_prepare_enable:
1181 clk_put(host->clk); 1185 clk_put(host->clk);
1182 return ret; 1186 return ret;
1183} 1187}
@@ -1198,7 +1202,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
1198 dma_release_channel(host->write_dma_chan); 1202 dma_release_channel(host->write_dma_chan);
1199 dma_release_channel(host->read_dma_chan); 1203 dma_release_channel(host->read_dma_chan);
1200 } 1204 }
1201 clk_disable(host->clk); 1205 clk_disable_unprepare(host->clk);
1202 clk_put(host->clk); 1206 clk_put(host->clk);
1203 } 1207 }
1204 1208
@@ -1210,7 +1214,7 @@ static int fsmc_nand_suspend(struct device *dev)
1210{ 1214{
1211 struct fsmc_nand_data *host = dev_get_drvdata(dev); 1215 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1212 if (host) 1216 if (host)
1213 clk_disable(host->clk); 1217 clk_disable_unprepare(host->clk);
1214 return 0; 1218 return 0;
1215} 1219}
1216 1220
@@ -1218,7 +1222,7 @@ static int fsmc_nand_resume(struct device *dev)
1218{ 1222{
1219 struct fsmc_nand_data *host = dev_get_drvdata(dev); 1223 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1220 if (host) { 1224 if (host) {
1221 clk_enable(host->clk); 1225 clk_prepare_enable(host->clk);
1222 fsmc_nand_setup(host->regs_va, host->bank, 1226 fsmc_nand_setup(host->regs_va, host->bank,
1223 host->nand.options & NAND_BUSWIDTH_16, 1227 host->nand.options & NAND_BUSWIDTH_16,
1224 host->dev_timings); 1228 host->dev_timings);
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index 4effb8c579db..a0924515c396 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -51,15 +51,26 @@
51 51
52#define BP_BCH_FLASH0LAYOUT0_ECC0 12 52#define BP_BCH_FLASH0LAYOUT0_ECC0 12
53#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0) 53#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
54#define BF_BCH_FLASH0LAYOUT0_ECC0(v) \ 54#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
55 (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0) 55#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
56#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
57 (GPMI_IS_MX6Q(x) \
58 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
59 & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
60 : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
61 & BM_BCH_FLASH0LAYOUT0_ECC0) \
62 )
56 63
57#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0 64#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
58#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \ 65#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
59 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE) 66 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
60#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \ 67#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
61 (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\ 68 (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
62 & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) 69#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
70 (GPMI_IS_MX6Q(x) \
71 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
72 : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
73 )
63 74
64#define HW_BCH_FLASH0LAYOUT1 0x00000090 75#define HW_BCH_FLASH0LAYOUT1 0x00000090
65 76
@@ -72,13 +83,24 @@
72 83
73#define BP_BCH_FLASH0LAYOUT1_ECCN 12 84#define BP_BCH_FLASH0LAYOUT1_ECCN 12
74#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN) 85#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
75#define BF_BCH_FLASH0LAYOUT1_ECCN(v) \ 86#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
76 (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN) 87#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
88#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
89 (GPMI_IS_MX6Q(x) \
90 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
91 & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
92 : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
93 & BM_BCH_FLASH0LAYOUT1_ECCN) \
94 )
77 95
78#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0 96#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
79#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \ 97#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
80 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) 98 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
81#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \ 99#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
82 (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ 100 (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
83 & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) 101#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
102 (GPMI_IS_MX6Q(x) \
103 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
104 : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
105 )
84#endif 106#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index e8ea7107932e..a1f43329ad43 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -21,7 +21,6 @@
21#include <linux/mtd/gpmi-nand.h> 21#include <linux/mtd/gpmi-nand.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <mach/mxs.h>
25 24
26#include "gpmi-nand.h" 25#include "gpmi-nand.h"
27#include "gpmi-regs.h" 26#include "gpmi-regs.h"
@@ -37,6 +36,8 @@ struct timing_threshod timing_default_threshold = {
37 .max_dll_delay_in_ns = 16, 36 .max_dll_delay_in_ns = 16,
38}; 37};
39 38
39#define MXS_SET_ADDR 0x4
40#define MXS_CLR_ADDR 0x8
40/* 41/*
41 * Clear the bit and poll it cleared. This is usually called with 42 * Clear the bit and poll it cleared. This is usually called with
42 * a reset address and mask being either SFTRST(bit 31) or CLKGATE 43 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
@@ -47,7 +48,7 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
47 int timeout = 0x400; 48 int timeout = 0x400;
48 49
49 /* clear the bit */ 50 /* clear the bit */
50 __mxs_clrl(mask, addr); 51 writel(mask, addr + MXS_CLR_ADDR);
51 52
52 /* 53 /*
53 * SFTRST needs 3 GPMI clocks to settle, the reference manual 54 * SFTRST needs 3 GPMI clocks to settle, the reference manual
@@ -92,11 +93,11 @@ static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
92 goto error; 93 goto error;
93 94
94 /* clear CLKGATE */ 95 /* clear CLKGATE */
95 __mxs_clrl(MODULE_CLKGATE, reset_addr); 96 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
96 97
97 if (!just_enable) { 98 if (!just_enable) {
98 /* set SFTRST to reset the block */ 99 /* set SFTRST to reset the block */
99 __mxs_setl(MODULE_SFTRST, reset_addr); 100 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
100 udelay(1); 101 udelay(1);
101 102
102 /* poll CLKGATE becoming set */ 103 /* poll CLKGATE becoming set */
@@ -223,13 +224,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
223 /* Configure layout 0. */ 224 /* Configure layout 0. */
224 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) 225 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
225 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) 226 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
226 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength) 227 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
227 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size), 228 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
228 r->bch_regs + HW_BCH_FLASH0LAYOUT0); 229 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
229 230
230 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) 231 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
231 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength) 232 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
232 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size), 233 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
233 r->bch_regs + HW_BCH_FLASH0LAYOUT1); 234 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
234 235
235 /* Set *all* chip selects to use layout 0. */ 236 /* Set *all* chip selects to use layout 0. */
@@ -255,11 +256,12 @@ static unsigned int ns_to_cycles(unsigned int time,
255 return max(k, min); 256 return max(k, min);
256} 257}
257 258
259#define DEF_MIN_PROP_DELAY 5
260#define DEF_MAX_PROP_DELAY 9
258/* Apply timing to current hardware conditions. */ 261/* Apply timing to current hardware conditions. */
259static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this, 262static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
260 struct gpmi_nfc_hardware_timing *hw) 263 struct gpmi_nfc_hardware_timing *hw)
261{ 264{
262 struct gpmi_nand_platform_data *pdata = this->pdata;
263 struct timing_threshod *nfc = &timing_default_threshold; 265 struct timing_threshod *nfc = &timing_default_threshold;
264 struct nand_chip *nand = &this->nand; 266 struct nand_chip *nand = &this->nand;
265 struct nand_timing target = this->timing; 267 struct nand_timing target = this->timing;
@@ -276,8 +278,8 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
276 int ideal_sample_delay_in_ns; 278 int ideal_sample_delay_in_ns;
277 unsigned int sample_delay_factor; 279 unsigned int sample_delay_factor;
278 int tEYE; 280 int tEYE;
279 unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns; 281 unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
280 unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns; 282 unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
281 283
282 /* 284 /*
283 * If there are multiple chips, we need to relax the timings to allow 285 * If there are multiple chips, we need to relax the timings to allow
@@ -803,7 +805,8 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
803 if (GPMI_IS_MX23(this)) { 805 if (GPMI_IS_MX23(this)) {
804 mask = MX23_BM_GPMI_DEBUG_READY0 << chip; 806 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
805 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); 807 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
806 } else if (GPMI_IS_MX28(this)) { 808 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
809 /* MX28 shares the same R/B register as MX6Q. */
807 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); 810 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
808 reg = readl(r->gpmi_regs + HW_GPMI_STAT); 811 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
809 } else 812 } else
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index b68e04310bd8..a05b7b444d4f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,6 +25,8 @@
25#include <linux/mtd/gpmi-nand.h> 25#include <linux/mtd/gpmi-nand.h>
26#include <linux/mtd/partitions.h> 26#include <linux/mtd/partitions.h>
27#include <linux/pinctrl/consumer.h> 27#include <linux/pinctrl/consumer.h>
28#include <linux/of.h>
29#include <linux/of_device.h>
28#include "gpmi-nand.h" 30#include "gpmi-nand.h"
29 31
30/* add our owner bbt descriptor */ 32/* add our owner bbt descriptor */
@@ -387,7 +389,7 @@ static void release_bch_irq(struct gpmi_nand_data *this)
387static bool gpmi_dma_filter(struct dma_chan *chan, void *param) 389static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
388{ 390{
389 struct gpmi_nand_data *this = param; 391 struct gpmi_nand_data *this = param;
390 struct resource *r = this->private; 392 int dma_channel = (int)this->private;
391 393
392 if (!mxs_dma_is_apbh(chan)) 394 if (!mxs_dma_is_apbh(chan))
393 return false; 395 return false;
@@ -399,7 +401,7 @@ static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
399 * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7 401 * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
400 * (These eight channels share the same IRQ!) 402 * (These eight channels share the same IRQ!)
401 */ 403 */
402 if (r->start <= chan->chan_id && chan->chan_id <= r->end) { 404 if (dma_channel == chan->chan_id) {
403 chan->private = &this->dma_data; 405 chan->private = &this->dma_data;
404 return true; 406 return true;
405 } 407 }
@@ -419,57 +421,45 @@ static void release_dma_channels(struct gpmi_nand_data *this)
419static int __devinit acquire_dma_channels(struct gpmi_nand_data *this) 421static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
420{ 422{
421 struct platform_device *pdev = this->pdev; 423 struct platform_device *pdev = this->pdev;
422 struct gpmi_nand_platform_data *pdata = this->pdata; 424 struct resource *r_dma;
423 struct resources *res = &this->resources; 425 struct device_node *dn;
424 struct resource *r, *r_dma; 426 int dma_channel;
425 unsigned int i; 427 unsigned int ret;
428 struct dma_chan *dma_chan;
429 dma_cap_mask_t mask;
430
431 /* dma channel, we only use the first one. */
432 dn = pdev->dev.of_node;
433 ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel);
434 if (ret) {
435 pr_err("unable to get DMA channel from dt.\n");
436 goto acquire_err;
437 }
438 this->private = (void *)dma_channel;
426 439
427 r = platform_get_resource_byname(pdev, IORESOURCE_DMA, 440 /* gpmi dma interrupt */
428 GPMI_NAND_DMA_CHANNELS_RES_NAME);
429 r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ, 441 r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
430 GPMI_NAND_DMA_INTERRUPT_RES_NAME); 442 GPMI_NAND_DMA_INTERRUPT_RES_NAME);
431 if (!r || !r_dma) { 443 if (!r_dma) {
432 pr_err("Can't get resource for DMA\n"); 444 pr_err("Can't get resource for DMA\n");
433 return -ENXIO; 445 goto acquire_err;
434 } 446 }
447 this->dma_data.chan_irq = r_dma->start;
435 448
436 /* used in gpmi_dma_filter() */ 449 /* request dma channel */
437 this->private = r; 450 dma_cap_zero(mask);
438 451 dma_cap_set(DMA_SLAVE, mask);
439 for (i = r->start; i <= r->end; i++) {
440 struct dma_chan *dma_chan;
441 dma_cap_mask_t mask;
442 452
443 if (i - r->start >= pdata->max_chip_count) 453 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
444 break; 454 if (!dma_chan) {
445 455 pr_err("dma_request_channel failed.\n");
446 dma_cap_zero(mask); 456 goto acquire_err;
447 dma_cap_set(DMA_SLAVE, mask);
448
449 /* get the DMA interrupt */
450 if (r_dma->start == r_dma->end) {
451 /* only register the first. */
452 if (i == r->start)
453 this->dma_data.chan_irq = r_dma->start;
454 else
455 this->dma_data.chan_irq = NO_IRQ;
456 } else
457 this->dma_data.chan_irq = r_dma->start + (i - r->start);
458
459 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
460 if (!dma_chan)
461 goto acquire_err;
462
463 /* fill the first empty item */
464 this->dma_chans[i - r->start] = dma_chan;
465 } 457 }
466 458
467 res->dma_low_channel = r->start; 459 this->dma_chans[0] = dma_chan;
468 res->dma_high_channel = i;
469 return 0; 460 return 0;
470 461
471acquire_err: 462acquire_err:
472 pr_err("Can't acquire DMA channel %u\n", i);
473 release_dma_channels(this); 463 release_dma_channels(this);
474 return -EINVAL; 464 return -EINVAL;
475} 465}
@@ -851,7 +841,7 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
851} 841}
852 842
853static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, 843static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
854 uint8_t *buf, int page) 844 uint8_t *buf, int oob_required, int page)
855{ 845{
856 struct gpmi_nand_data *this = chip->priv; 846 struct gpmi_nand_data *this = chip->priv;
857 struct bch_geometry *nfc_geo = &this->bch_geometry; 847 struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -917,28 +907,31 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
917 mtd->ecc_stats.corrected += corrected; 907 mtd->ecc_stats.corrected += corrected;
918 } 908 }
919 909
920 /* 910 if (oob_required) {
921 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for 911 /*
922 * details about our policy for delivering the OOB. 912 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
923 * 913 * for details about our policy for delivering the OOB.
924 * We fill the caller's buffer with set bits, and then copy the block 914 *
925 * mark to th caller's buffer. Note that, if block mark swapping was 915 * We fill the caller's buffer with set bits, and then copy the
926 * necessary, it has already been done, so we can rely on the first 916 * block mark to th caller's buffer. Note that, if block mark
927 * byte of the auxiliary buffer to contain the block mark. 917 * swapping was necessary, it has already been done, so we can
928 */ 918 * rely on the first byte of the auxiliary buffer to contain
929 memset(chip->oob_poi, ~0, mtd->oobsize); 919 * the block mark.
930 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; 920 */
921 memset(chip->oob_poi, ~0, mtd->oobsize);
922 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
931 923
932 read_page_swap_end(this, buf, mtd->writesize, 924 read_page_swap_end(this, buf, mtd->writesize,
933 this->payload_virt, this->payload_phys, 925 this->payload_virt, this->payload_phys,
934 nfc_geo->payload_size, 926 nfc_geo->payload_size,
935 payload_virt, payload_phys); 927 payload_virt, payload_phys);
928 }
936exit_nfc: 929exit_nfc:
937 return ret; 930 return ret;
938} 931}
939 932
940static void gpmi_ecc_write_page(struct mtd_info *mtd, 933static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
941 struct nand_chip *chip, const uint8_t *buf) 934 const uint8_t *buf, int oob_required)
942{ 935{
943 struct gpmi_nand_data *this = chip->priv; 936 struct gpmi_nand_data *this = chip->priv;
944 struct bch_geometry *nfc_geo = &this->bch_geometry; 937 struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -1077,7 +1070,7 @@ exit_auxiliary:
1077 * this driver. 1070 * this driver.
1078 */ 1071 */
1079static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1072static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1080 int page, int sndcmd) 1073 int page)
1081{ 1074{
1082 struct gpmi_nand_data *this = chip->priv; 1075 struct gpmi_nand_data *this = chip->priv;
1083 1076
@@ -1100,11 +1093,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1100 chip->oob_poi[0] = chip->read_byte(mtd); 1093 chip->oob_poi[0] = chip->read_byte(mtd);
1101 } 1094 }
1102 1095
1103 /* 1096 return 0;
1104 * Return true, indicating that the next call to this function must send
1105 * a command.
1106 */
1107 return true;
1108} 1097}
1109 1098
1110static int 1099static int
@@ -1318,7 +1307,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1318 /* Write the first page of the current stride. */ 1307 /* Write the first page of the current stride. */
1319 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); 1308 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1320 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 1309 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1321 chip->ecc.write_page_raw(mtd, chip, buffer); 1310 chip->ecc.write_page_raw(mtd, chip, buffer, 0);
1322 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1311 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1323 1312
1324 /* Wait for the write to finish. */ 1313 /* Wait for the write to finish. */
@@ -1444,6 +1433,10 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
1444 if (ret) 1433 if (ret)
1445 return ret; 1434 return ret;
1446 1435
1436 /* Adjust the ECC strength according to the chip. */
1437 this->nand.ecc.strength = this->bch_geometry.ecc_strength;
1438 this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
1439
1447 /* NAND boot init, depends on the gpmi_set_geometry(). */ 1440 /* NAND boot init, depends on the gpmi_set_geometry(). */
1448 return nand_boot_init(this); 1441 return nand_boot_init(this);
1449} 1442}
@@ -1471,9 +1464,9 @@ void gpmi_nfc_exit(struct gpmi_nand_data *this)
1471 1464
1472static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) 1465static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
1473{ 1466{
1474 struct gpmi_nand_platform_data *pdata = this->pdata;
1475 struct mtd_info *mtd = &this->mtd; 1467 struct mtd_info *mtd = &this->mtd;
1476 struct nand_chip *chip = &this->nand; 1468 struct nand_chip *chip = &this->nand;
1469 struct mtd_part_parser_data ppdata = {};
1477 int ret; 1470 int ret;
1478 1471
1479 /* init current chip */ 1472 /* init current chip */
@@ -1502,6 +1495,7 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
1502 chip->options |= NAND_NO_SUBPAGE_WRITE; 1495 chip->options |= NAND_NO_SUBPAGE_WRITE;
1503 chip->ecc.mode = NAND_ECC_HW; 1496 chip->ecc.mode = NAND_ECC_HW;
1504 chip->ecc.size = 1; 1497 chip->ecc.size = 1;
1498 chip->ecc.strength = 8;
1505 chip->ecc.layout = &gpmi_hw_ecclayout; 1499 chip->ecc.layout = &gpmi_hw_ecclayout;
1506 1500
1507 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ 1501 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
@@ -1511,14 +1505,14 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
1511 if (ret) 1505 if (ret)
1512 goto err_out; 1506 goto err_out;
1513 1507
1514 ret = nand_scan(mtd, pdata->max_chip_count); 1508 ret = nand_scan(mtd, 1);
1515 if (ret) { 1509 if (ret) {
1516 pr_err("Chip scan failed\n"); 1510 pr_err("Chip scan failed\n");
1517 goto err_out; 1511 goto err_out;
1518 } 1512 }
1519 1513
1520 ret = mtd_device_parse_register(mtd, NULL, NULL, 1514 ppdata.of_node = this->pdev->dev.of_node;
1521 pdata->partitions, pdata->partition_count); 1515 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
1522 if (ret) 1516 if (ret)
1523 goto err_out; 1517 goto err_out;
1524 return 0; 1518 return 0;
@@ -1528,12 +1522,41 @@ err_out:
1528 return ret; 1522 return ret;
1529} 1523}
1530 1524
1525static const struct platform_device_id gpmi_ids[] = {
1526 { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
1527 { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
1528 { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
1529 {},
1530};
1531
1532static const struct of_device_id gpmi_nand_id_table[] = {
1533 {
1534 .compatible = "fsl,imx23-gpmi-nand",
1535 .data = (void *)&gpmi_ids[IS_MX23]
1536 }, {
1537 .compatible = "fsl,imx28-gpmi-nand",
1538 .data = (void *)&gpmi_ids[IS_MX28]
1539 }, {
1540 .compatible = "fsl,imx6q-gpmi-nand",
1541 .data = (void *)&gpmi_ids[IS_MX6Q]
1542 }, {}
1543};
1544MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
1545
1531static int __devinit gpmi_nand_probe(struct platform_device *pdev) 1546static int __devinit gpmi_nand_probe(struct platform_device *pdev)
1532{ 1547{
1533 struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
1534 struct gpmi_nand_data *this; 1548 struct gpmi_nand_data *this;
1549 const struct of_device_id *of_id;
1535 int ret; 1550 int ret;
1536 1551
1552 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
1553 if (of_id) {
1554 pdev->id_entry = of_id->data;
1555 } else {
1556 pr_err("Failed to find the right device id.\n");
1557 return -ENOMEM;
1558 }
1559
1537 this = kzalloc(sizeof(*this), GFP_KERNEL); 1560 this = kzalloc(sizeof(*this), GFP_KERNEL);
1538 if (!this) { 1561 if (!this) {
1539 pr_err("Failed to allocate per-device memory\n"); 1562 pr_err("Failed to allocate per-device memory\n");
@@ -1543,13 +1566,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
1543 platform_set_drvdata(pdev, this); 1566 platform_set_drvdata(pdev, this);
1544 this->pdev = pdev; 1567 this->pdev = pdev;
1545 this->dev = &pdev->dev; 1568 this->dev = &pdev->dev;
1546 this->pdata = pdata;
1547
1548 if (pdata->platform_init) {
1549 ret = pdata->platform_init();
1550 if (ret)
1551 goto platform_init_error;
1552 }
1553 1569
1554 ret = acquire_resources(this); 1570 ret = acquire_resources(this);
1555 if (ret) 1571 if (ret)
@@ -1567,7 +1583,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
1567 1583
1568exit_nfc_init: 1584exit_nfc_init:
1569 release_resources(this); 1585 release_resources(this);
1570platform_init_error:
1571exit_acquire_resources: 1586exit_acquire_resources:
1572 platform_set_drvdata(pdev, NULL); 1587 platform_set_drvdata(pdev, NULL);
1573 kfree(this); 1588 kfree(this);
@@ -1585,19 +1600,10 @@ static int __exit gpmi_nand_remove(struct platform_device *pdev)
1585 return 0; 1600 return 0;
1586} 1601}
1587 1602
1588static const struct platform_device_id gpmi_ids[] = {
1589 {
1590 .name = "imx23-gpmi-nand",
1591 .driver_data = IS_MX23,
1592 }, {
1593 .name = "imx28-gpmi-nand",
1594 .driver_data = IS_MX28,
1595 }, {},
1596};
1597
1598static struct platform_driver gpmi_nand_driver = { 1603static struct platform_driver gpmi_nand_driver = {
1599 .driver = { 1604 .driver = {
1600 .name = "gpmi-nand", 1605 .name = "gpmi-nand",
1606 .of_match_table = gpmi_nand_id_table,
1601 }, 1607 },
1602 .probe = gpmi_nand_probe, 1608 .probe = gpmi_nand_probe,
1603 .remove = __exit_p(gpmi_nand_remove), 1609 .remove = __exit_p(gpmi_nand_remove),
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index ec6180d4ff8f..ce5daa160920 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -266,8 +266,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
266#define STATUS_UNCORRECTABLE 0xfe 266#define STATUS_UNCORRECTABLE 0xfe
267 267
268/* Use the platform_id to distinguish different Archs. */ 268/* Use the platform_id to distinguish different Archs. */
269#define IS_MX23 0x1 269#define IS_MX23 0x0
270#define IS_MX28 0x2 270#define IS_MX28 0x1
271#define IS_MX6Q 0x2
271#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23) 272#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
272#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28) 273#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
274#define GPMI_IS_MX6Q(x) ((x)->pdev->id_entry->driver_data == IS_MX6Q)
273#endif 275#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 9bf5ce5fa22d..50166e93ba96 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -124,7 +124,6 @@ static int __init h1910_init(void)
124 /* 15 us command delay time */ 124 /* 15 us command delay time */
125 this->chip_delay = 50; 125 this->chip_delay = 50;
126 this->ecc.mode = NAND_ECC_SOFT; 126 this->ecc.mode = NAND_ECC_SOFT;
127 this->options = NAND_NO_AUTOINCR;
128 127
129 /* Scan to find existence of the device */ 128 /* Scan to find existence of the device */
130 if (nand_scan(h1910_nand_mtd, 1)) { 129 if (nand_scan(h1910_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index e4147e8acb7c..a6fa884ae49b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -332,11 +332,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
332 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; 332 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
333 chip->ecc.size = 512; 333 chip->ecc.size = 512;
334 chip->ecc.bytes = 9; 334 chip->ecc.bytes = 9;
335 chip->ecc.strength = 2; 335 chip->ecc.strength = 4;
336 /*
337 * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
338 * conservative guess, given 9 ecc bytes and reed-solomon alg.
339 */
340 336
341 if (pdata) 337 if (pdata)
342 chip->ecc.layout = pdata->ecc_layout; 338 chip->ecc.layout = pdata->ecc_layout;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index c240cf1af961..c259c24d7986 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -734,7 +734,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
734 chip->write_buf = mpc5121_nfc_write_buf; 734 chip->write_buf = mpc5121_nfc_write_buf;
735 chip->verify_buf = mpc5121_nfc_verify_buf; 735 chip->verify_buf = mpc5121_nfc_verify_buf;
736 chip->select_chip = mpc5121_nfc_select_chip; 736 chip->select_chip = mpc5121_nfc_select_chip;
737 chip->options = NAND_NO_AUTOINCR;
738 chip->bbt_options = NAND_BBT_USE_FLASH; 737 chip->bbt_options = NAND_BBT_USE_FLASH;
739 chip->ecc.mode = NAND_ECC_SOFT; 738 chip->ecc.mode = NAND_ECC_SOFT;
740 739
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 9e374e9bd296..c58e6a93f445 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,6 +32,8 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/completion.h> 34#include <linux/completion.h>
35#include <linux/of_device.h>
36#include <linux/of_mtd.h>
35 37
36#include <asm/mach/flash.h> 38#include <asm/mach/flash.h>
37#include <mach/mxc_nand.h> 39#include <mach/mxc_nand.h>
@@ -140,13 +142,47 @@
140 142
141#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34) 143#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
142 144
145struct mxc_nand_host;
146
147struct mxc_nand_devtype_data {
148 void (*preset)(struct mtd_info *);
149 void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
150 void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
151 void (*send_page)(struct mtd_info *, unsigned int);
152 void (*send_read_id)(struct mxc_nand_host *);
153 uint16_t (*get_dev_status)(struct mxc_nand_host *);
154 int (*check_int)(struct mxc_nand_host *);
155 void (*irq_control)(struct mxc_nand_host *, int);
156 u32 (*get_ecc_status)(struct mxc_nand_host *);
157 struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
158 void (*select_chip)(struct mtd_info *mtd, int chip);
159 int (*correct_data)(struct mtd_info *mtd, u_char *dat,
160 u_char *read_ecc, u_char *calc_ecc);
161
162 /*
163 * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
164 * (CONFIG1:INT_MSK is set). To handle this the driver uses
165 * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
166 */
167 int irqpending_quirk;
168 int needs_ip;
169
170 size_t regs_offset;
171 size_t spare0_offset;
172 size_t axi_offset;
173
174 int spare_len;
175 int eccbytes;
176 int eccsize;
177};
178
143struct mxc_nand_host { 179struct mxc_nand_host {
144 struct mtd_info mtd; 180 struct mtd_info mtd;
145 struct nand_chip nand; 181 struct nand_chip nand;
146 struct device *dev; 182 struct device *dev;
147 183
148 void *spare0; 184 void __iomem *spare0;
149 void *main_area0; 185 void __iomem *main_area0;
150 186
151 void __iomem *base; 187 void __iomem *base;
152 void __iomem *regs; 188 void __iomem *regs;
@@ -163,16 +199,9 @@ struct mxc_nand_host {
163 199
164 uint8_t *data_buf; 200 uint8_t *data_buf;
165 unsigned int buf_start; 201 unsigned int buf_start;
166 int spare_len; 202
167 203 const struct mxc_nand_devtype_data *devtype_data;
168 void (*preset)(struct mtd_info *); 204 struct mxc_nand_platform_data pdata;
169 void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
170 void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
171 void (*send_page)(struct mtd_info *, unsigned int);
172 void (*send_read_id)(struct mxc_nand_host *);
173 uint16_t (*get_dev_status)(struct mxc_nand_host *);
174 int (*check_int)(struct mxc_nand_host *);
175 void (*irq_control)(struct mxc_nand_host *, int);
176}; 205};
177 206
178/* OOB placement block for use with hardware ecc generation */ 207/* OOB placement block for use with hardware ecc generation */
@@ -242,21 +271,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
242 } 271 }
243}; 272};
244 273
245static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; 274static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
246
247static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
248{
249 struct mxc_nand_host *host = dev_id;
250
251 if (!host->check_int(host))
252 return IRQ_NONE;
253
254 host->irq_control(host, 0);
255
256 complete(&host->op_completion);
257
258 return IRQ_HANDLED;
259}
260 275
261static int check_int_v3(struct mxc_nand_host *host) 276static int check_int_v3(struct mxc_nand_host *host)
262{ 277{
@@ -280,26 +295,12 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
280 if (!(tmp & NFC_V1_V2_CONFIG2_INT)) 295 if (!(tmp & NFC_V1_V2_CONFIG2_INT))
281 return 0; 296 return 0;
282 297
283 if (!cpu_is_mx21()) 298 if (!host->devtype_data->irqpending_quirk)
284 writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); 299 writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
285 300
286 return 1; 301 return 1;
287} 302}
288 303
289/*
290 * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
291 * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
292 * driver can enable/disable the irq line rather than simply masking the
293 * interrupts.
294 */
295static void irq_control_mx21(struct mxc_nand_host *host, int activate)
296{
297 if (activate)
298 enable_irq(host->irq);
299 else
300 disable_irq_nosync(host->irq);
301}
302
303static void irq_control_v1_v2(struct mxc_nand_host *host, int activate) 304static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
304{ 305{
305 uint16_t tmp; 306 uint16_t tmp;
@@ -328,6 +329,47 @@ static void irq_control_v3(struct mxc_nand_host *host, int activate)
328 writel(tmp, NFC_V3_CONFIG2); 329 writel(tmp, NFC_V3_CONFIG2);
329} 330}
330 331
332static void irq_control(struct mxc_nand_host *host, int activate)
333{
334 if (host->devtype_data->irqpending_quirk) {
335 if (activate)
336 enable_irq(host->irq);
337 else
338 disable_irq_nosync(host->irq);
339 } else {
340 host->devtype_data->irq_control(host, activate);
341 }
342}
343
344static u32 get_ecc_status_v1(struct mxc_nand_host *host)
345{
346 return readw(NFC_V1_V2_ECC_STATUS_RESULT);
347}
348
349static u32 get_ecc_status_v2(struct mxc_nand_host *host)
350{
351 return readl(NFC_V1_V2_ECC_STATUS_RESULT);
352}
353
354static u32 get_ecc_status_v3(struct mxc_nand_host *host)
355{
356 return readl(NFC_V3_ECC_STATUS_RESULT);
357}
358
359static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
360{
361 struct mxc_nand_host *host = dev_id;
362
363 if (!host->devtype_data->check_int(host))
364 return IRQ_NONE;
365
366 irq_control(host, 0);
367
368 complete(&host->op_completion);
369
370 return IRQ_HANDLED;
371}
372
331/* This function polls the NANDFC to wait for the basic operation to 373/* This function polls the NANDFC to wait for the basic operation to
332 * complete by checking the INT bit of config2 register. 374 * complete by checking the INT bit of config2 register.
333 */ 375 */
@@ -336,14 +378,14 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
336 int max_retries = 8000; 378 int max_retries = 8000;
337 379
338 if (useirq) { 380 if (useirq) {
339 if (!host->check_int(host)) { 381 if (!host->devtype_data->check_int(host)) {
340 INIT_COMPLETION(host->op_completion); 382 INIT_COMPLETION(host->op_completion);
341 host->irq_control(host, 1); 383 irq_control(host, 1);
342 wait_for_completion(&host->op_completion); 384 wait_for_completion(&host->op_completion);
343 } 385 }
344 } else { 386 } else {
345 while (max_retries-- > 0) { 387 while (max_retries-- > 0) {
346 if (host->check_int(host)) 388 if (host->devtype_data->check_int(host))
347 break; 389 break;
348 390
349 udelay(1); 391 udelay(1);
@@ -374,7 +416,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
374 writew(cmd, NFC_V1_V2_FLASH_CMD); 416 writew(cmd, NFC_V1_V2_FLASH_CMD);
375 writew(NFC_CMD, NFC_V1_V2_CONFIG2); 417 writew(NFC_CMD, NFC_V1_V2_CONFIG2);
376 418
377 if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) { 419 if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
378 int max_retries = 100; 420 int max_retries = 100;
379 /* Reset completion is indicated by NFC_CONFIG2 */ 421 /* Reset completion is indicated by NFC_CONFIG2 */
380 /* being set to 0 */ 422 /* being set to 0 */
@@ -433,13 +475,27 @@ static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
433 wait_op_done(host, false); 475 wait_op_done(host, false);
434} 476}
435 477
436static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops) 478static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
479{
480 struct nand_chip *nand_chip = mtd->priv;
481 struct mxc_nand_host *host = nand_chip->priv;
482
483 /* NANDFC buffer 0 is used for page read/write */
484 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
485
486 writew(ops, NFC_V1_V2_CONFIG2);
487
488 /* Wait for operation to complete */
489 wait_op_done(host, true);
490}
491
492static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
437{ 493{
438 struct nand_chip *nand_chip = mtd->priv; 494 struct nand_chip *nand_chip = mtd->priv;
439 struct mxc_nand_host *host = nand_chip->priv; 495 struct mxc_nand_host *host = nand_chip->priv;
440 int bufs, i; 496 int bufs, i;
441 497
442 if (nfc_is_v1() && mtd->writesize > 512) 498 if (mtd->writesize > 512)
443 bufs = 4; 499 bufs = 4;
444 else 500 else
445 bufs = 1; 501 bufs = 1;
@@ -463,7 +519,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
463 519
464 wait_op_done(host, true); 520 wait_op_done(host, true);
465 521
466 memcpy(host->data_buf, host->main_area0, 16); 522 memcpy_fromio(host->data_buf, host->main_area0, 16);
467} 523}
468 524
469/* Request the NANDFC to perform a read of the NAND device ID. */ 525/* Request the NANDFC to perform a read of the NAND device ID. */
@@ -479,7 +535,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
479 /* Wait for operation to complete */ 535 /* Wait for operation to complete */
480 wait_op_done(host, true); 536 wait_op_done(host, true);
481 537
482 memcpy(host->data_buf, host->main_area0, 16); 538 memcpy_fromio(host->data_buf, host->main_area0, 16);
483 539
484 if (this->options & NAND_BUSWIDTH_16) { 540 if (this->options & NAND_BUSWIDTH_16) {
485 /* compress the ID info */ 541 /* compress the ID info */
@@ -555,7 +611,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
555 * additional correction. 2-Bit errors cannot be corrected by 611 * additional correction. 2-Bit errors cannot be corrected by
556 * HW ECC, so we need to return failure 612 * HW ECC, so we need to return failure
557 */ 613 */
558 uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT); 614 uint16_t ecc_status = get_ecc_status_v1(host);
559 615
560 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { 616 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
561 pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); 617 pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
@@ -580,10 +636,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
580 636
581 no_subpages = mtd->writesize >> 9; 637 no_subpages = mtd->writesize >> 9;
582 638
583 if (nfc_is_v21()) 639 ecc_stat = host->devtype_data->get_ecc_status(host);
584 ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
585 else
586 ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
587 640
588 do { 641 do {
589 err = ecc_stat & ecc_bit_mask; 642 err = ecc_stat & ecc_bit_mask;
@@ -616,7 +669,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
616 669
617 /* Check for status request */ 670 /* Check for status request */
618 if (host->status_request) 671 if (host->status_request)
619 return host->get_dev_status(host) & 0xFF; 672 return host->devtype_data->get_dev_status(host) & 0xFF;
620 673
621 ret = *(uint8_t *)(host->data_buf + host->buf_start); 674 ret = *(uint8_t *)(host->data_buf + host->buf_start);
622 host->buf_start++; 675 host->buf_start++;
@@ -682,7 +735,7 @@ static int mxc_nand_verify_buf(struct mtd_info *mtd,
682 735
683/* This function is used by upper layer for select and 736/* This function is used by upper layer for select and
684 * deselect of the NAND chip */ 737 * deselect of the NAND chip */
685static void mxc_nand_select_chip(struct mtd_info *mtd, int chip) 738static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
686{ 739{
687 struct nand_chip *nand_chip = mtd->priv; 740 struct nand_chip *nand_chip = mtd->priv;
688 struct mxc_nand_host *host = nand_chip->priv; 741 struct mxc_nand_host *host = nand_chip->priv;
@@ -701,11 +754,30 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
701 clk_prepare_enable(host->clk); 754 clk_prepare_enable(host->clk);
702 host->clk_act = 1; 755 host->clk_act = 1;
703 } 756 }
757}
704 758
705 if (nfc_is_v21()) { 759static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
706 host->active_cs = chip; 760{
707 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); 761 struct nand_chip *nand_chip = mtd->priv;
762 struct mxc_nand_host *host = nand_chip->priv;
763
764 if (chip == -1) {
765 /* Disable the NFC clock */
766 if (host->clk_act) {
767 clk_disable(host->clk);
768 host->clk_act = 0;
769 }
770 return;
771 }
772
773 if (!host->clk_act) {
774 /* Enable the NFC clock */
775 clk_enable(host->clk);
776 host->clk_act = 1;
708 } 777 }
778
779 host->active_cs = chip;
780 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
709} 781}
710 782
711/* 783/*
@@ -718,23 +790,23 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
718 u16 i, j; 790 u16 i, j;
719 u16 n = mtd->writesize >> 9; 791 u16 n = mtd->writesize >> 9;
720 u8 *d = host->data_buf + mtd->writesize; 792 u8 *d = host->data_buf + mtd->writesize;
721 u8 *s = host->spare0; 793 u8 __iomem *s = host->spare0;
722 u16 t = host->spare_len; 794 u16 t = host->devtype_data->spare_len;
723 795
724 j = (mtd->oobsize / n >> 1) << 1; 796 j = (mtd->oobsize / n >> 1) << 1;
725 797
726 if (bfrom) { 798 if (bfrom) {
727 for (i = 0; i < n - 1; i++) 799 for (i = 0; i < n - 1; i++)
728 memcpy(d + i * j, s + i * t, j); 800 memcpy_fromio(d + i * j, s + i * t, j);
729 801
730 /* the last section */ 802 /* the last section */
731 memcpy(d + i * j, s + i * t, mtd->oobsize - i * j); 803 memcpy_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
732 } else { 804 } else {
733 for (i = 0; i < n - 1; i++) 805 for (i = 0; i < n - 1; i++)
734 memcpy(&s[i * t], &d[i * j], j); 806 memcpy_toio(&s[i * t], &d[i * j], j);
735 807
736 /* the last section */ 808 /* the last section */
737 memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j); 809 memcpy_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
738 } 810 }
739} 811}
740 812
@@ -751,34 +823,44 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
751 * perform a read/write buf operation, the saved column 823 * perform a read/write buf operation, the saved column
752 * address is used to index into the full page. 824 * address is used to index into the full page.
753 */ 825 */
754 host->send_addr(host, 0, page_addr == -1); 826 host->devtype_data->send_addr(host, 0, page_addr == -1);
755 if (mtd->writesize > 512) 827 if (mtd->writesize > 512)
756 /* another col addr cycle for 2k page */ 828 /* another col addr cycle for 2k page */
757 host->send_addr(host, 0, false); 829 host->devtype_data->send_addr(host, 0, false);
758 } 830 }
759 831
760 /* Write out page address, if necessary */ 832 /* Write out page address, if necessary */
761 if (page_addr != -1) { 833 if (page_addr != -1) {
762 /* paddr_0 - p_addr_7 */ 834 /* paddr_0 - p_addr_7 */
763 host->send_addr(host, (page_addr & 0xff), false); 835 host->devtype_data->send_addr(host, (page_addr & 0xff), false);
764 836
765 if (mtd->writesize > 512) { 837 if (mtd->writesize > 512) {
766 if (mtd->size >= 0x10000000) { 838 if (mtd->size >= 0x10000000) {
767 /* paddr_8 - paddr_15 */ 839 /* paddr_8 - paddr_15 */
768 host->send_addr(host, (page_addr >> 8) & 0xff, false); 840 host->devtype_data->send_addr(host,
769 host->send_addr(host, (page_addr >> 16) & 0xff, true); 841 (page_addr >> 8) & 0xff,
842 false);
843 host->devtype_data->send_addr(host,
844 (page_addr >> 16) & 0xff,
845 true);
770 } else 846 } else
771 /* paddr_8 - paddr_15 */ 847 /* paddr_8 - paddr_15 */
772 host->send_addr(host, (page_addr >> 8) & 0xff, true); 848 host->devtype_data->send_addr(host,
849 (page_addr >> 8) & 0xff, true);
773 } else { 850 } else {
774 /* One more address cycle for higher density devices */ 851 /* One more address cycle for higher density devices */
775 if (mtd->size >= 0x4000000) { 852 if (mtd->size >= 0x4000000) {
776 /* paddr_8 - paddr_15 */ 853 /* paddr_8 - paddr_15 */
777 host->send_addr(host, (page_addr >> 8) & 0xff, false); 854 host->devtype_data->send_addr(host,
778 host->send_addr(host, (page_addr >> 16) & 0xff, true); 855 (page_addr >> 8) & 0xff,
856 false);
857 host->devtype_data->send_addr(host,
858 (page_addr >> 16) & 0xff,
859 true);
779 } else 860 } else
780 /* paddr_8 - paddr_15 */ 861 /* paddr_8 - paddr_15 */
781 host->send_addr(host, (page_addr >> 8) & 0xff, true); 862 host->devtype_data->send_addr(host,
863 (page_addr >> 8) & 0xff, true);
782 } 864 }
783 } 865 }
784} 866}
@@ -800,7 +882,35 @@ static int get_eccsize(struct mtd_info *mtd)
800 return 8; 882 return 8;
801} 883}
802 884
803static void preset_v1_v2(struct mtd_info *mtd) 885static void preset_v1(struct mtd_info *mtd)
886{
887 struct nand_chip *nand_chip = mtd->priv;
888 struct mxc_nand_host *host = nand_chip->priv;
889 uint16_t config1 = 0;
890
891 if (nand_chip->ecc.mode == NAND_ECC_HW)
892 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
893
894 if (!host->devtype_data->irqpending_quirk)
895 config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
896
897 host->eccsize = 1;
898
899 writew(config1, NFC_V1_V2_CONFIG1);
900 /* preset operation */
901
902 /* Unlock the internal RAM Buffer */
903 writew(0x2, NFC_V1_V2_CONFIG);
904
905 /* Blocks to be unlocked */
906 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
907 writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
908
909 /* Unlock Block Command for given address range */
910 writew(0x4, NFC_V1_V2_WRPROT);
911}
912
913static void preset_v2(struct mtd_info *mtd)
804{ 914{
805 struct nand_chip *nand_chip = mtd->priv; 915 struct nand_chip *nand_chip = mtd->priv;
806 struct mxc_nand_host *host = nand_chip->priv; 916 struct mxc_nand_host *host = nand_chip->priv;
@@ -809,13 +919,12 @@ static void preset_v1_v2(struct mtd_info *mtd)
809 if (nand_chip->ecc.mode == NAND_ECC_HW) 919 if (nand_chip->ecc.mode == NAND_ECC_HW)
810 config1 |= NFC_V1_V2_CONFIG1_ECC_EN; 920 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
811 921
812 if (nfc_is_v21()) 922 config1 |= NFC_V2_CONFIG1_FP_INT;
813 config1 |= NFC_V2_CONFIG1_FP_INT;
814 923
815 if (!cpu_is_mx21()) 924 if (!host->devtype_data->irqpending_quirk)
816 config1 |= NFC_V1_V2_CONFIG1_INT_MSK; 925 config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
817 926
818 if (nfc_is_v21() && mtd->writesize) { 927 if (mtd->writesize) {
819 uint16_t pages_per_block = mtd->erasesize / mtd->writesize; 928 uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
820 929
821 host->eccsize = get_eccsize(mtd); 930 host->eccsize = get_eccsize(mtd);
@@ -834,20 +943,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
834 writew(0x2, NFC_V1_V2_CONFIG); 943 writew(0x2, NFC_V1_V2_CONFIG);
835 944
836 /* Blocks to be unlocked */ 945 /* Blocks to be unlocked */
837 if (nfc_is_v21()) { 946 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
838 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0); 947 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
839 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1); 948 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
840 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2); 949 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
841 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3); 950 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
842 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0); 951 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
843 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1); 952 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
844 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2); 953 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
845 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
846 } else if (nfc_is_v1()) {
847 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
848 writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
849 } else
850 BUG();
851 954
852 /* Unlock Block Command for given address range */ 955 /* Unlock Block Command for given address range */
853 writew(0x4, NFC_V1_V2_WRPROT); 956 writew(0x4, NFC_V1_V2_WRPROT);
@@ -937,15 +1040,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
937 /* Command pre-processing step */ 1040 /* Command pre-processing step */
938 switch (command) { 1041 switch (command) {
939 case NAND_CMD_RESET: 1042 case NAND_CMD_RESET:
940 host->preset(mtd); 1043 host->devtype_data->preset(mtd);
941 host->send_cmd(host, command, false); 1044 host->devtype_data->send_cmd(host, command, false);
942 break; 1045 break;
943 1046
944 case NAND_CMD_STATUS: 1047 case NAND_CMD_STATUS:
945 host->buf_start = 0; 1048 host->buf_start = 0;
946 host->status_request = true; 1049 host->status_request = true;
947 1050
948 host->send_cmd(host, command, true); 1051 host->devtype_data->send_cmd(host, command, true);
949 mxc_do_addr_cycle(mtd, column, page_addr); 1052 mxc_do_addr_cycle(mtd, column, page_addr);
950 break; 1053 break;
951 1054
@@ -958,15 +1061,16 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
958 1061
959 command = NAND_CMD_READ0; /* only READ0 is valid */ 1062 command = NAND_CMD_READ0; /* only READ0 is valid */
960 1063
961 host->send_cmd(host, command, false); 1064 host->devtype_data->send_cmd(host, command, false);
962 mxc_do_addr_cycle(mtd, column, page_addr); 1065 mxc_do_addr_cycle(mtd, column, page_addr);
963 1066
964 if (mtd->writesize > 512) 1067 if (mtd->writesize > 512)
965 host->send_cmd(host, NAND_CMD_READSTART, true); 1068 host->devtype_data->send_cmd(host,
1069 NAND_CMD_READSTART, true);
966 1070
967 host->send_page(mtd, NFC_OUTPUT); 1071 host->devtype_data->send_page(mtd, NFC_OUTPUT);
968 1072
969 memcpy(host->data_buf, host->main_area0, mtd->writesize); 1073 memcpy_fromio(host->data_buf, host->main_area0, mtd->writesize);
970 copy_spare(mtd, true); 1074 copy_spare(mtd, true);
971 break; 1075 break;
972 1076
@@ -977,28 +1081,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
977 1081
978 host->buf_start = column; 1082 host->buf_start = column;
979 1083
980 host->send_cmd(host, command, false); 1084 host->devtype_data->send_cmd(host, command, false);
981 mxc_do_addr_cycle(mtd, column, page_addr); 1085 mxc_do_addr_cycle(mtd, column, page_addr);
982 break; 1086 break;
983 1087
984 case NAND_CMD_PAGEPROG: 1088 case NAND_CMD_PAGEPROG:
985 memcpy(host->main_area0, host->data_buf, mtd->writesize); 1089 memcpy_toio(host->main_area0, host->data_buf, mtd->writesize);
986 copy_spare(mtd, false); 1090 copy_spare(mtd, false);
987 host->send_page(mtd, NFC_INPUT); 1091 host->devtype_data->send_page(mtd, NFC_INPUT);
988 host->send_cmd(host, command, true); 1092 host->devtype_data->send_cmd(host, command, true);
989 mxc_do_addr_cycle(mtd, column, page_addr); 1093 mxc_do_addr_cycle(mtd, column, page_addr);
990 break; 1094 break;
991 1095
992 case NAND_CMD_READID: 1096 case NAND_CMD_READID:
993 host->send_cmd(host, command, true); 1097 host->devtype_data->send_cmd(host, command, true);
994 mxc_do_addr_cycle(mtd, column, page_addr); 1098 mxc_do_addr_cycle(mtd, column, page_addr);
995 host->send_read_id(host); 1099 host->devtype_data->send_read_id(host);
996 host->buf_start = column; 1100 host->buf_start = column;
997 break; 1101 break;
998 1102
999 case NAND_CMD_ERASE1: 1103 case NAND_CMD_ERASE1:
1000 case NAND_CMD_ERASE2: 1104 case NAND_CMD_ERASE2:
1001 host->send_cmd(host, command, false); 1105 host->devtype_data->send_cmd(host, command, false);
1002 mxc_do_addr_cycle(mtd, column, page_addr); 1106 mxc_do_addr_cycle(mtd, column, page_addr);
1003 1107
1004 break; 1108 break;
@@ -1032,15 +1136,191 @@ static struct nand_bbt_descr bbt_mirror_descr = {
1032 .pattern = mirror_pattern, 1136 .pattern = mirror_pattern,
1033}; 1137};
1034 1138
1139/* v1 + irqpending_quirk: i.MX21 */
1140static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
1141 .preset = preset_v1,
1142 .send_cmd = send_cmd_v1_v2,
1143 .send_addr = send_addr_v1_v2,
1144 .send_page = send_page_v1,
1145 .send_read_id = send_read_id_v1_v2,
1146 .get_dev_status = get_dev_status_v1_v2,
1147 .check_int = check_int_v1_v2,
1148 .irq_control = irq_control_v1_v2,
1149 .get_ecc_status = get_ecc_status_v1,
1150 .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
1151 .ecclayout_2k = &nandv1_hw_eccoob_largepage,
1152 .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
1153 .select_chip = mxc_nand_select_chip_v1_v3,
1154 .correct_data = mxc_nand_correct_data_v1,
1155 .irqpending_quirk = 1,
1156 .needs_ip = 0,
1157 .regs_offset = 0xe00,
1158 .spare0_offset = 0x800,
1159 .spare_len = 16,
1160 .eccbytes = 3,
1161 .eccsize = 1,
1162};
1163
1164/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
1165static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
1166 .preset = preset_v1,
1167 .send_cmd = send_cmd_v1_v2,
1168 .send_addr = send_addr_v1_v2,
1169 .send_page = send_page_v1,
1170 .send_read_id = send_read_id_v1_v2,
1171 .get_dev_status = get_dev_status_v1_v2,
1172 .check_int = check_int_v1_v2,
1173 .irq_control = irq_control_v1_v2,
1174 .get_ecc_status = get_ecc_status_v1,
1175 .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
1176 .ecclayout_2k = &nandv1_hw_eccoob_largepage,
1177 .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
1178 .select_chip = mxc_nand_select_chip_v1_v3,
1179 .correct_data = mxc_nand_correct_data_v1,
1180 .irqpending_quirk = 0,
1181 .needs_ip = 0,
1182 .regs_offset = 0xe00,
1183 .spare0_offset = 0x800,
1184 .axi_offset = 0,
1185 .spare_len = 16,
1186 .eccbytes = 3,
1187 .eccsize = 1,
1188};
1189
1190/* v21: i.MX25, i.MX35 */
1191static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
1192 .preset = preset_v2,
1193 .send_cmd = send_cmd_v1_v2,
1194 .send_addr = send_addr_v1_v2,
1195 .send_page = send_page_v2,
1196 .send_read_id = send_read_id_v1_v2,
1197 .get_dev_status = get_dev_status_v1_v2,
1198 .check_int = check_int_v1_v2,
1199 .irq_control = irq_control_v1_v2,
1200 .get_ecc_status = get_ecc_status_v2,
1201 .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
1202 .ecclayout_2k = &nandv2_hw_eccoob_largepage,
1203 .ecclayout_4k = &nandv2_hw_eccoob_4k,
1204 .select_chip = mxc_nand_select_chip_v2,
1205 .correct_data = mxc_nand_correct_data_v2_v3,
1206 .irqpending_quirk = 0,
1207 .needs_ip = 0,
1208 .regs_offset = 0x1e00,
1209 .spare0_offset = 0x1000,
1210 .axi_offset = 0,
1211 .spare_len = 64,
1212 .eccbytes = 9,
1213 .eccsize = 0,
1214};
1215
1216/* v3: i.MX51, i.MX53 */
1217static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
1218 .preset = preset_v3,
1219 .send_cmd = send_cmd_v3,
1220 .send_addr = send_addr_v3,
1221 .send_page = send_page_v3,
1222 .send_read_id = send_read_id_v3,
1223 .get_dev_status = get_dev_status_v3,
1224 .check_int = check_int_v3,
1225 .irq_control = irq_control_v3,
1226 .get_ecc_status = get_ecc_status_v3,
1227 .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
1228 .ecclayout_2k = &nandv2_hw_eccoob_largepage,
1229 .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
1230 .select_chip = mxc_nand_select_chip_v1_v3,
1231 .correct_data = mxc_nand_correct_data_v2_v3,
1232 .irqpending_quirk = 0,
1233 .needs_ip = 1,
1234 .regs_offset = 0,
1235 .spare0_offset = 0x1000,
1236 .axi_offset = 0x1e00,
1237 .spare_len = 64,
1238 .eccbytes = 0,
1239 .eccsize = 0,
1240};
1241
1242#ifdef CONFIG_OF_MTD
1243static const struct of_device_id mxcnd_dt_ids[] = {
1244 {
1245 .compatible = "fsl,imx21-nand",
1246 .data = &imx21_nand_devtype_data,
1247 }, {
1248 .compatible = "fsl,imx27-nand",
1249 .data = &imx27_nand_devtype_data,
1250 }, {
1251 .compatible = "fsl,imx25-nand",
1252 .data = &imx25_nand_devtype_data,
1253 }, {
1254 .compatible = "fsl,imx51-nand",
1255 .data = &imx51_nand_devtype_data,
1256 },
1257 { /* sentinel */ }
1258};
1259
1260static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
1261{
1262 struct device_node *np = host->dev->of_node;
1263 struct mxc_nand_platform_data *pdata = &host->pdata;
1264 const struct of_device_id *of_id =
1265 of_match_device(mxcnd_dt_ids, host->dev);
1266 int buswidth;
1267
1268 if (!np)
1269 return 1;
1270
1271 if (of_get_nand_ecc_mode(np) >= 0)
1272 pdata->hw_ecc = 1;
1273
1274 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1275
1276 buswidth = of_get_nand_bus_width(np);
1277 if (buswidth < 0)
1278 return buswidth;
1279
1280 pdata->width = buswidth / 8;
1281
1282 host->devtype_data = of_id->data;
1283
1284 return 0;
1285}
1286#else
1287static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
1288{
1289 return 1;
1290}
1291#endif
1292
1293static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
1294{
1295 struct mxc_nand_platform_data *pdata = host->dev->platform_data;
1296
1297 if (!pdata)
1298 return -ENODEV;
1299
1300 host->pdata = *pdata;
1301
1302 if (nfc_is_v1()) {
1303 if (cpu_is_mx21())
1304 host->devtype_data = &imx21_nand_devtype_data;
1305 else
1306 host->devtype_data = &imx27_nand_devtype_data;
1307 } else if (nfc_is_v21()) {
1308 host->devtype_data = &imx25_nand_devtype_data;
1309 } else if (nfc_is_v3_2()) {
1310 host->devtype_data = &imx51_nand_devtype_data;
1311 } else
1312 BUG();
1313
1314 return 0;
1315}
1316
1035static int __init mxcnd_probe(struct platform_device *pdev) 1317static int __init mxcnd_probe(struct platform_device *pdev)
1036{ 1318{
1037 struct nand_chip *this; 1319 struct nand_chip *this;
1038 struct mtd_info *mtd; 1320 struct mtd_info *mtd;
1039 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
1040 struct mxc_nand_host *host; 1321 struct mxc_nand_host *host;
1041 struct resource *res; 1322 struct resource *res;
1042 int err = 0; 1323 int err = 0;
1043 struct nand_ecclayout *oob_smallpage, *oob_largepage;
1044 1324
1045 /* Allocate memory for MTD device structure and private data */ 1325 /* Allocate memory for MTD device structure and private data */
1046 host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE + 1326 host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
@@ -1065,7 +1345,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1065 this->priv = host; 1345 this->priv = host;
1066 this->dev_ready = mxc_nand_dev_ready; 1346 this->dev_ready = mxc_nand_dev_ready;
1067 this->cmdfunc = mxc_nand_command; 1347 this->cmdfunc = mxc_nand_command;
1068 this->select_chip = mxc_nand_select_chip;
1069 this->read_byte = mxc_nand_read_byte; 1348 this->read_byte = mxc_nand_read_byte;
1070 this->read_word = mxc_nand_read_word; 1349 this->read_word = mxc_nand_read_word;
1071 this->write_buf = mxc_nand_write_buf; 1350 this->write_buf = mxc_nand_write_buf;
@@ -1095,36 +1374,26 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1095 1374
1096 host->main_area0 = host->base; 1375 host->main_area0 = host->base;
1097 1376
1098 if (nfc_is_v1() || nfc_is_v21()) { 1377 err = mxcnd_probe_dt(host);
1099 host->preset = preset_v1_v2; 1378 if (err > 0)
1100 host->send_cmd = send_cmd_v1_v2; 1379 err = mxcnd_probe_pdata(host);
1101 host->send_addr = send_addr_v1_v2; 1380 if (err < 0)
1102 host->send_page = send_page_v1_v2; 1381 goto eirq;
1103 host->send_read_id = send_read_id_v1_v2;
1104 host->get_dev_status = get_dev_status_v1_v2;
1105 host->check_int = check_int_v1_v2;
1106 if (cpu_is_mx21())
1107 host->irq_control = irq_control_mx21;
1108 else
1109 host->irq_control = irq_control_v1_v2;
1110 }
1111 1382
1112 if (nfc_is_v21()) { 1383 if (host->devtype_data->regs_offset)
1113 host->regs = host->base + 0x1e00; 1384 host->regs = host->base + host->devtype_data->regs_offset;
1114 host->spare0 = host->base + 0x1000; 1385 host->spare0 = host->base + host->devtype_data->spare0_offset;
1115 host->spare_len = 64; 1386 if (host->devtype_data->axi_offset)
1116 oob_smallpage = &nandv2_hw_eccoob_smallpage; 1387 host->regs_axi = host->base + host->devtype_data->axi_offset;
1117 oob_largepage = &nandv2_hw_eccoob_largepage; 1388
1118 this->ecc.bytes = 9; 1389 this->ecc.bytes = host->devtype_data->eccbytes;
1119 } else if (nfc_is_v1()) { 1390 host->eccsize = host->devtype_data->eccsize;
1120 host->regs = host->base + 0xe00; 1391
1121 host->spare0 = host->base + 0x800; 1392 this->select_chip = host->devtype_data->select_chip;
1122 host->spare_len = 16; 1393 this->ecc.size = 512;
1123 oob_smallpage = &nandv1_hw_eccoob_smallpage; 1394 this->ecc.layout = host->devtype_data->ecclayout_512;
1124 oob_largepage = &nandv1_hw_eccoob_largepage; 1395
1125 this->ecc.bytes = 3; 1396 if (host->devtype_data->needs_ip) {
1126 host->eccsize = 1;
1127 } else if (nfc_is_v3_2()) {
1128 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1397 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1129 if (!res) { 1398 if (!res) {
1130 err = -ENODEV; 1399 err = -ENODEV;
@@ -1135,42 +1404,22 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1135 err = -ENOMEM; 1404 err = -ENOMEM;
1136 goto eirq; 1405 goto eirq;
1137 } 1406 }
1138 host->regs_axi = host->base + 0x1e00; 1407 }
1139 host->spare0 = host->base + 0x1000;
1140 host->spare_len = 64;
1141 host->preset = preset_v3;
1142 host->send_cmd = send_cmd_v3;
1143 host->send_addr = send_addr_v3;
1144 host->send_page = send_page_v3;
1145 host->send_read_id = send_read_id_v3;
1146 host->check_int = check_int_v3;
1147 host->get_dev_status = get_dev_status_v3;
1148 host->irq_control = irq_control_v3;
1149 oob_smallpage = &nandv2_hw_eccoob_smallpage;
1150 oob_largepage = &nandv2_hw_eccoob_largepage;
1151 } else
1152 BUG();
1153
1154 this->ecc.size = 512;
1155 this->ecc.layout = oob_smallpage;
1156 1408
1157 if (pdata->hw_ecc) { 1409 if (host->pdata.hw_ecc) {
1158 this->ecc.calculate = mxc_nand_calculate_ecc; 1410 this->ecc.calculate = mxc_nand_calculate_ecc;
1159 this->ecc.hwctl = mxc_nand_enable_hwecc; 1411 this->ecc.hwctl = mxc_nand_enable_hwecc;
1160 if (nfc_is_v1()) 1412 this->ecc.correct = host->devtype_data->correct_data;
1161 this->ecc.correct = mxc_nand_correct_data_v1;
1162 else
1163 this->ecc.correct = mxc_nand_correct_data_v2_v3;
1164 this->ecc.mode = NAND_ECC_HW; 1413 this->ecc.mode = NAND_ECC_HW;
1165 } else { 1414 } else {
1166 this->ecc.mode = NAND_ECC_SOFT; 1415 this->ecc.mode = NAND_ECC_SOFT;
1167 } 1416 }
1168 1417
1169 /* NAND bus width determines access funtions used by upper layer */ 1418 /* NAND bus width determines access functions used by upper layer */
1170 if (pdata->width == 2) 1419 if (host->pdata.width == 2)
1171 this->options |= NAND_BUSWIDTH_16; 1420 this->options |= NAND_BUSWIDTH_16;
1172 1421
1173 if (pdata->flash_bbt) { 1422 if (host->pdata.flash_bbt) {
1174 this->bbt_td = &bbt_main_descr; 1423 this->bbt_td = &bbt_main_descr;
1175 this->bbt_md = &bbt_mirror_descr; 1424 this->bbt_md = &bbt_mirror_descr;
1176 /* update flash based bbt */ 1425 /* update flash based bbt */
@@ -1182,28 +1431,25 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1182 host->irq = platform_get_irq(pdev, 0); 1431 host->irq = platform_get_irq(pdev, 0);
1183 1432
1184 /* 1433 /*
1185 * mask the interrupt. For i.MX21 explicitely call 1434 * Use host->devtype_data->irq_control() here instead of irq_control()
1186 * irq_control_v1_v2 to use the mask bit. We can't call 1435 * because we must not disable_irq_nosync without having requested the
1187 * disable_irq_nosync() for an interrupt we do not own yet. 1436 * irq.
1188 */ 1437 */
1189 if (cpu_is_mx21()) 1438 host->devtype_data->irq_control(host, 0);
1190 irq_control_v1_v2(host, 0);
1191 else
1192 host->irq_control(host, 0);
1193 1439
1194 err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); 1440 err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
1195 if (err) 1441 if (err)
1196 goto eirq; 1442 goto eirq;
1197 1443
1198 host->irq_control(host, 0);
1199
1200 /* 1444 /*
1201 * Now that the interrupt is disabled make sure the interrupt 1445 * Now that we "own" the interrupt make sure the interrupt mask bit is
1202 * mask bit is cleared on i.MX21. Otherwise we can't read 1446 * cleared on i.MX21. Otherwise we can't read the interrupt status bit
1203 * the interrupt status bit on this machine. 1447 * on this machine.
1204 */ 1448 */
1205 if (cpu_is_mx21()) 1449 if (host->devtype_data->irqpending_quirk) {
1206 irq_control_v1_v2(host, 1); 1450 disable_irq_nosync(host->irq);
1451 host->devtype_data->irq_control(host, 1);
1452 }
1207 1453
1208 /* first scan to find the device and get the page size */ 1454 /* first scan to find the device and get the page size */
1209 if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) { 1455 if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
@@ -1212,18 +1458,12 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1212 } 1458 }
1213 1459
1214 /* Call preset again, with correct writesize this time */ 1460 /* Call preset again, with correct writesize this time */
1215 host->preset(mtd); 1461 host->devtype_data->preset(mtd);
1216 1462
1217 if (mtd->writesize == 2048) 1463 if (mtd->writesize == 2048)
1218 this->ecc.layout = oob_largepage; 1464 this->ecc.layout = host->devtype_data->ecclayout_2k;
1219 if (nfc_is_v21() && mtd->writesize == 4096) 1465 else if (mtd->writesize == 4096)
1220 this->ecc.layout = &nandv2_hw_eccoob_4k; 1466 this->ecc.layout = host->devtype_data->ecclayout_4k;
1221
1222 /* second phase scan */
1223 if (nand_scan_tail(mtd)) {
1224 err = -ENXIO;
1225 goto escan;
1226 }
1227 1467
1228 if (this->ecc.mode == NAND_ECC_HW) { 1468 if (this->ecc.mode == NAND_ECC_HW) {
1229 if (nfc_is_v1()) 1469 if (nfc_is_v1())
@@ -1232,9 +1472,19 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1232 this->ecc.strength = (host->eccsize == 4) ? 4 : 8; 1472 this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
1233 } 1473 }
1234 1474
1475 /* second phase scan */
1476 if (nand_scan_tail(mtd)) {
1477 err = -ENXIO;
1478 goto escan;
1479 }
1480
1235 /* Register the partitions */ 1481 /* Register the partitions */
1236 mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts, 1482 mtd_device_parse_register(mtd, part_probes,
1237 pdata->nr_parts); 1483 &(struct mtd_part_parser_data){
1484 .of_node = pdev->dev.of_node,
1485 },
1486 host->pdata.parts,
1487 host->pdata.nr_parts);
1238 1488
1239 platform_set_drvdata(pdev, host); 1489 platform_set_drvdata(pdev, host);
1240 1490
@@ -1275,6 +1525,8 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
1275static struct platform_driver mxcnd_driver = { 1525static struct platform_driver mxcnd_driver = {
1276 .driver = { 1526 .driver = {
1277 .name = DRIVER_NAME, 1527 .name = DRIVER_NAME,
1528 .owner = THIS_MODULE,
1529 .of_match_table = of_match_ptr(mxcnd_dt_ids),
1278 }, 1530 },
1279 .remove = __devexit_p(mxcnd_remove), 1531 .remove = __devexit_p(mxcnd_remove),
1280}; 1532};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 47b19c0bb070..d47586cf64ce 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1066,15 +1066,17 @@ EXPORT_SYMBOL(nand_lock);
1066 * @mtd: mtd info structure 1066 * @mtd: mtd info structure
1067 * @chip: nand chip info structure 1067 * @chip: nand chip info structure
1068 * @buf: buffer to store read data 1068 * @buf: buffer to store read data
1069 * @oob_required: caller requires OOB data read to chip->oob_poi
1069 * @page: page number to read 1070 * @page: page number to read
1070 * 1071 *
1071 * Not for syndrome calculating ECC controllers, which use a special oob layout. 1072 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1072 */ 1073 */
1073static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1074static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1074 uint8_t *buf, int page) 1075 uint8_t *buf, int oob_required, int page)
1075{ 1076{
1076 chip->read_buf(mtd, buf, mtd->writesize); 1077 chip->read_buf(mtd, buf, mtd->writesize);
1077 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1078 if (oob_required)
1079 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1078 return 0; 1080 return 0;
1079} 1081}
1080 1082
@@ -1083,13 +1085,14 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1083 * @mtd: mtd info structure 1085 * @mtd: mtd info structure
1084 * @chip: nand chip info structure 1086 * @chip: nand chip info structure
1085 * @buf: buffer to store read data 1087 * @buf: buffer to store read data
1088 * @oob_required: caller requires OOB data read to chip->oob_poi
1086 * @page: page number to read 1089 * @page: page number to read
1087 * 1090 *
1088 * We need a special oob layout and handling even when OOB isn't used. 1091 * We need a special oob layout and handling even when OOB isn't used.
1089 */ 1092 */
1090static int nand_read_page_raw_syndrome(struct mtd_info *mtd, 1093static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1091 struct nand_chip *chip, 1094 struct nand_chip *chip, uint8_t *buf,
1092 uint8_t *buf, int page) 1095 int oob_required, int page)
1093{ 1096{
1094 int eccsize = chip->ecc.size; 1097 int eccsize = chip->ecc.size;
1095 int eccbytes = chip->ecc.bytes; 1098 int eccbytes = chip->ecc.bytes;
@@ -1126,10 +1129,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1126 * @mtd: mtd info structure 1129 * @mtd: mtd info structure
1127 * @chip: nand chip info structure 1130 * @chip: nand chip info structure
1128 * @buf: buffer to store read data 1131 * @buf: buffer to store read data
1132 * @oob_required: caller requires OOB data read to chip->oob_poi
1129 * @page: page number to read 1133 * @page: page number to read
1130 */ 1134 */
1131static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1135static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1132 uint8_t *buf, int page) 1136 uint8_t *buf, int oob_required, int page)
1133{ 1137{
1134 int i, eccsize = chip->ecc.size; 1138 int i, eccsize = chip->ecc.size;
1135 int eccbytes = chip->ecc.bytes; 1139 int eccbytes = chip->ecc.bytes;
@@ -1138,8 +1142,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1138 uint8_t *ecc_calc = chip->buffers->ecccalc; 1142 uint8_t *ecc_calc = chip->buffers->ecccalc;
1139 uint8_t *ecc_code = chip->buffers->ecccode; 1143 uint8_t *ecc_code = chip->buffers->ecccode;
1140 uint32_t *eccpos = chip->ecc.layout->eccpos; 1144 uint32_t *eccpos = chip->ecc.layout->eccpos;
1145 unsigned int max_bitflips = 0;
1141 1146
1142 chip->ecc.read_page_raw(mtd, chip, buf, page); 1147 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1143 1148
1144 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 1149 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1145 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1150 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1154,12 +1159,14 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1154 int stat; 1159 int stat;
1155 1160
1156 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 1161 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1157 if (stat < 0) 1162 if (stat < 0) {
1158 mtd->ecc_stats.failed++; 1163 mtd->ecc_stats.failed++;
1159 else 1164 } else {
1160 mtd->ecc_stats.corrected += stat; 1165 mtd->ecc_stats.corrected += stat;
1166 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1167 }
1161 } 1168 }
1162 return 0; 1169 return max_bitflips;
1163} 1170}
1164 1171
1165/** 1172/**
@@ -1180,6 +1187,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1180 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 1187 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1181 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 1188 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1182 int index = 0; 1189 int index = 0;
1190 unsigned int max_bitflips = 0;
1183 1191
1184 /* Column address within the page aligned to ECC size (256bytes) */ 1192 /* Column address within the page aligned to ECC size (256bytes) */
1185 start_step = data_offs / chip->ecc.size; 1193 start_step = data_offs / chip->ecc.size;
@@ -1244,12 +1252,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1244 1252
1245 stat = chip->ecc.correct(mtd, p, 1253 stat = chip->ecc.correct(mtd, p,
1246 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]); 1254 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1247 if (stat < 0) 1255 if (stat < 0) {
1248 mtd->ecc_stats.failed++; 1256 mtd->ecc_stats.failed++;
1249 else 1257 } else {
1250 mtd->ecc_stats.corrected += stat; 1258 mtd->ecc_stats.corrected += stat;
1259 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1260 }
1251 } 1261 }
1252 return 0; 1262 return max_bitflips;
1253} 1263}
1254 1264
1255/** 1265/**
@@ -1257,12 +1267,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1257 * @mtd: mtd info structure 1267 * @mtd: mtd info structure
1258 * @chip: nand chip info structure 1268 * @chip: nand chip info structure
1259 * @buf: buffer to store read data 1269 * @buf: buffer to store read data
1270 * @oob_required: caller requires OOB data read to chip->oob_poi
1260 * @page: page number to read 1271 * @page: page number to read
1261 * 1272 *
1262 * Not for syndrome calculating ECC controllers which need a special oob layout. 1273 * Not for syndrome calculating ECC controllers which need a special oob layout.
1263 */ 1274 */
1264static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1275static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1265 uint8_t *buf, int page) 1276 uint8_t *buf, int oob_required, int page)
1266{ 1277{
1267 int i, eccsize = chip->ecc.size; 1278 int i, eccsize = chip->ecc.size;
1268 int eccbytes = chip->ecc.bytes; 1279 int eccbytes = chip->ecc.bytes;
@@ -1271,6 +1282,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1271 uint8_t *ecc_calc = chip->buffers->ecccalc; 1282 uint8_t *ecc_calc = chip->buffers->ecccalc;
1272 uint8_t *ecc_code = chip->buffers->ecccode; 1283 uint8_t *ecc_code = chip->buffers->ecccode;
1273 uint32_t *eccpos = chip->ecc.layout->eccpos; 1284 uint32_t *eccpos = chip->ecc.layout->eccpos;
1285 unsigned int max_bitflips = 0;
1274 1286
1275 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 1287 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1276 chip->ecc.hwctl(mtd, NAND_ECC_READ); 1288 chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1289,12 +1301,14 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1289 int stat; 1301 int stat;
1290 1302
1291 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 1303 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1292 if (stat < 0) 1304 if (stat < 0) {
1293 mtd->ecc_stats.failed++; 1305 mtd->ecc_stats.failed++;
1294 else 1306 } else {
1295 mtd->ecc_stats.corrected += stat; 1307 mtd->ecc_stats.corrected += stat;
1308 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1309 }
1296 } 1310 }
1297 return 0; 1311 return max_bitflips;
1298} 1312}
1299 1313
1300/** 1314/**
@@ -1302,6 +1316,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1302 * @mtd: mtd info structure 1316 * @mtd: mtd info structure
1303 * @chip: nand chip info structure 1317 * @chip: nand chip info structure
1304 * @buf: buffer to store read data 1318 * @buf: buffer to store read data
1319 * @oob_required: caller requires OOB data read to chip->oob_poi
1305 * @page: page number to read 1320 * @page: page number to read
1306 * 1321 *
1307 * Hardware ECC for large page chips, require OOB to be read first. For this 1322 * Hardware ECC for large page chips, require OOB to be read first. For this
@@ -1311,7 +1326,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1311 * the data area, by overwriting the NAND manufacturer bad block markings. 1326 * the data area, by overwriting the NAND manufacturer bad block markings.
1312 */ 1327 */
1313static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, 1328static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1314 struct nand_chip *chip, uint8_t *buf, int page) 1329 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1315{ 1330{
1316 int i, eccsize = chip->ecc.size; 1331 int i, eccsize = chip->ecc.size;
1317 int eccbytes = chip->ecc.bytes; 1332 int eccbytes = chip->ecc.bytes;
@@ -1320,6 +1335,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1320 uint8_t *ecc_code = chip->buffers->ecccode; 1335 uint8_t *ecc_code = chip->buffers->ecccode;
1321 uint32_t *eccpos = chip->ecc.layout->eccpos; 1336 uint32_t *eccpos = chip->ecc.layout->eccpos;
1322 uint8_t *ecc_calc = chip->buffers->ecccalc; 1337 uint8_t *ecc_calc = chip->buffers->ecccalc;
1338 unsigned int max_bitflips = 0;
1323 1339
1324 /* Read the OOB area first */ 1340 /* Read the OOB area first */
1325 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 1341 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1337,12 +1353,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1337 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1353 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1338 1354
1339 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL); 1355 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1340 if (stat < 0) 1356 if (stat < 0) {
1341 mtd->ecc_stats.failed++; 1357 mtd->ecc_stats.failed++;
1342 else 1358 } else {
1343 mtd->ecc_stats.corrected += stat; 1359 mtd->ecc_stats.corrected += stat;
1360 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1361 }
1344 } 1362 }
1345 return 0; 1363 return max_bitflips;
1346} 1364}
1347 1365
1348/** 1366/**
@@ -1350,19 +1368,21 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1350 * @mtd: mtd info structure 1368 * @mtd: mtd info structure
1351 * @chip: nand chip info structure 1369 * @chip: nand chip info structure
1352 * @buf: buffer to store read data 1370 * @buf: buffer to store read data
1371 * @oob_required: caller requires OOB data read to chip->oob_poi
1353 * @page: page number to read 1372 * @page: page number to read
1354 * 1373 *
1355 * The hw generator calculates the error syndrome automatically. Therefore we 1374 * The hw generator calculates the error syndrome automatically. Therefore we
1356 * need a special oob layout and handling. 1375 * need a special oob layout and handling.
1357 */ 1376 */
1358static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1377static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1359 uint8_t *buf, int page) 1378 uint8_t *buf, int oob_required, int page)
1360{ 1379{
1361 int i, eccsize = chip->ecc.size; 1380 int i, eccsize = chip->ecc.size;
1362 int eccbytes = chip->ecc.bytes; 1381 int eccbytes = chip->ecc.bytes;
1363 int eccsteps = chip->ecc.steps; 1382 int eccsteps = chip->ecc.steps;
1364 uint8_t *p = buf; 1383 uint8_t *p = buf;
1365 uint8_t *oob = chip->oob_poi; 1384 uint8_t *oob = chip->oob_poi;
1385 unsigned int max_bitflips = 0;
1366 1386
1367 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 1387 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1368 int stat; 1388 int stat;
@@ -1379,10 +1399,12 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1379 chip->read_buf(mtd, oob, eccbytes); 1399 chip->read_buf(mtd, oob, eccbytes);
1380 stat = chip->ecc.correct(mtd, p, oob, NULL); 1400 stat = chip->ecc.correct(mtd, p, oob, NULL);
1381 1401
1382 if (stat < 0) 1402 if (stat < 0) {
1383 mtd->ecc_stats.failed++; 1403 mtd->ecc_stats.failed++;
1384 else 1404 } else {
1385 mtd->ecc_stats.corrected += stat; 1405 mtd->ecc_stats.corrected += stat;
1406 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1407 }
1386 1408
1387 oob += eccbytes; 1409 oob += eccbytes;
1388 1410
@@ -1397,7 +1419,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1397 if (i) 1419 if (i)
1398 chip->read_buf(mtd, oob, i); 1420 chip->read_buf(mtd, oob, i);
1399 1421
1400 return 0; 1422 return max_bitflips;
1401} 1423}
1402 1424
1403/** 1425/**
@@ -1459,11 +1481,9 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1459static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, 1481static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1460 struct mtd_oob_ops *ops) 1482 struct mtd_oob_ops *ops)
1461{ 1483{
1462 int chipnr, page, realpage, col, bytes, aligned; 1484 int chipnr, page, realpage, col, bytes, aligned, oob_required;
1463 struct nand_chip *chip = mtd->priv; 1485 struct nand_chip *chip = mtd->priv;
1464 struct mtd_ecc_stats stats; 1486 struct mtd_ecc_stats stats;
1465 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1466 int sndcmd = 1;
1467 int ret = 0; 1487 int ret = 0;
1468 uint32_t readlen = ops->len; 1488 uint32_t readlen = ops->len;
1469 uint32_t oobreadlen = ops->ooblen; 1489 uint32_t oobreadlen = ops->ooblen;
@@ -1471,6 +1491,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1471 mtd->oobavail : mtd->oobsize; 1491 mtd->oobavail : mtd->oobsize;
1472 1492
1473 uint8_t *bufpoi, *oob, *buf; 1493 uint8_t *bufpoi, *oob, *buf;
1494 unsigned int max_bitflips = 0;
1474 1495
1475 stats = mtd->ecc_stats; 1496 stats = mtd->ecc_stats;
1476 1497
@@ -1484,6 +1505,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1484 1505
1485 buf = ops->datbuf; 1506 buf = ops->datbuf;
1486 oob = ops->oobbuf; 1507 oob = ops->oobbuf;
1508 oob_required = oob ? 1 : 0;
1487 1509
1488 while (1) { 1510 while (1) {
1489 bytes = min(mtd->writesize - col, readlen); 1511 bytes = min(mtd->writesize - col, readlen);
@@ -1493,21 +1515,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1493 if (realpage != chip->pagebuf || oob) { 1515 if (realpage != chip->pagebuf || oob) {
1494 bufpoi = aligned ? buf : chip->buffers->databuf; 1516 bufpoi = aligned ? buf : chip->buffers->databuf;
1495 1517
1496 if (likely(sndcmd)) { 1518 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1497 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1498 sndcmd = 0;
1499 }
1500 1519
1501 /* Now read the page into the buffer */ 1520 /*
1521 * Now read the page into the buffer. Absent an error,
1522 * the read methods return max bitflips per ecc step.
1523 */
1502 if (unlikely(ops->mode == MTD_OPS_RAW)) 1524 if (unlikely(ops->mode == MTD_OPS_RAW))
1503 ret = chip->ecc.read_page_raw(mtd, chip, 1525 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1504 bufpoi, page); 1526 oob_required,
1527 page);
1505 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) 1528 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
1506 ret = chip->ecc.read_subpage(mtd, chip, 1529 ret = chip->ecc.read_subpage(mtd, chip,
1507 col, bytes, bufpoi); 1530 col, bytes, bufpoi);
1508 else 1531 else
1509 ret = chip->ecc.read_page(mtd, chip, bufpoi, 1532 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1510 page); 1533 oob_required, page);
1511 if (ret < 0) { 1534 if (ret < 0) {
1512 if (!aligned) 1535 if (!aligned)
1513 /* Invalidate page cache */ 1536 /* Invalidate page cache */
@@ -1515,22 +1538,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1515 break; 1538 break;
1516 } 1539 }
1517 1540
1541 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1542
1518 /* Transfer not aligned data */ 1543 /* Transfer not aligned data */
1519 if (!aligned) { 1544 if (!aligned) {
1520 if (!NAND_SUBPAGE_READ(chip) && !oob && 1545 if (!NAND_SUBPAGE_READ(chip) && !oob &&
1521 !(mtd->ecc_stats.failed - stats.failed) && 1546 !(mtd->ecc_stats.failed - stats.failed) &&
1522 (ops->mode != MTD_OPS_RAW)) 1547 (ops->mode != MTD_OPS_RAW)) {
1523 chip->pagebuf = realpage; 1548 chip->pagebuf = realpage;
1524 else 1549 chip->pagebuf_bitflips = ret;
1550 } else {
1525 /* Invalidate page cache */ 1551 /* Invalidate page cache */
1526 chip->pagebuf = -1; 1552 chip->pagebuf = -1;
1553 }
1527 memcpy(buf, chip->buffers->databuf + col, bytes); 1554 memcpy(buf, chip->buffers->databuf + col, bytes);
1528 } 1555 }
1529 1556
1530 buf += bytes; 1557 buf += bytes;
1531 1558
1532 if (unlikely(oob)) { 1559 if (unlikely(oob)) {
1533
1534 int toread = min(oobreadlen, max_oobsize); 1560 int toread = min(oobreadlen, max_oobsize);
1535 1561
1536 if (toread) { 1562 if (toread) {
@@ -1541,13 +1567,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1541 } 1567 }
1542 1568
1543 if (!(chip->options & NAND_NO_READRDY)) { 1569 if (!(chip->options & NAND_NO_READRDY)) {
1544 /* 1570 /* Apply delay or wait for ready/busy pin */
1545 * Apply delay or wait for ready/busy pin. Do
1546 * this before the AUTOINCR check, so no
1547 * problems arise if a chip which does auto
1548 * increment is marked as NOAUTOINCR by the
1549 * board driver.
1550 */
1551 if (!chip->dev_ready) 1571 if (!chip->dev_ready)
1552 udelay(chip->chip_delay); 1572 udelay(chip->chip_delay);
1553 else 1573 else
@@ -1556,6 +1576,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1556 } else { 1576 } else {
1557 memcpy(buf, chip->buffers->databuf + col, bytes); 1577 memcpy(buf, chip->buffers->databuf + col, bytes);
1558 buf += bytes; 1578 buf += bytes;
1579 max_bitflips = max_t(unsigned int, max_bitflips,
1580 chip->pagebuf_bitflips);
1559 } 1581 }
1560 1582
1561 readlen -= bytes; 1583 readlen -= bytes;
@@ -1575,26 +1597,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1575 chip->select_chip(mtd, -1); 1597 chip->select_chip(mtd, -1);
1576 chip->select_chip(mtd, chipnr); 1598 chip->select_chip(mtd, chipnr);
1577 } 1599 }
1578
1579 /*
1580 * Check, if the chip supports auto page increment or if we
1581 * have hit a block boundary.
1582 */
1583 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1584 sndcmd = 1;
1585 } 1600 }
1586 1601
1587 ops->retlen = ops->len - (size_t) readlen; 1602 ops->retlen = ops->len - (size_t) readlen;
1588 if (oob) 1603 if (oob)
1589 ops->oobretlen = ops->ooblen - oobreadlen; 1604 ops->oobretlen = ops->ooblen - oobreadlen;
1590 1605
1591 if (ret) 1606 if (ret < 0)
1592 return ret; 1607 return ret;
1593 1608
1594 if (mtd->ecc_stats.failed - stats.failed) 1609 if (mtd->ecc_stats.failed - stats.failed)
1595 return -EBADMSG; 1610 return -EBADMSG;
1596 1611
1597 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 1612 return max_bitflips;
1598} 1613}
1599 1614
1600/** 1615/**
@@ -1630,17 +1645,13 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1630 * @mtd: mtd info structure 1645 * @mtd: mtd info structure
1631 * @chip: nand chip info structure 1646 * @chip: nand chip info structure
1632 * @page: page number to read 1647 * @page: page number to read
1633 * @sndcmd: flag whether to issue read command or not
1634 */ 1648 */
1635static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1649static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1636 int page, int sndcmd) 1650 int page)
1637{ 1651{
1638 if (sndcmd) { 1652 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1639 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1640 sndcmd = 0;
1641 }
1642 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1653 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1643 return sndcmd; 1654 return 0;
1644} 1655}
1645 1656
1646/** 1657/**
@@ -1649,10 +1660,9 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1649 * @mtd: mtd info structure 1660 * @mtd: mtd info structure
1650 * @chip: nand chip info structure 1661 * @chip: nand chip info structure
1651 * @page: page number to read 1662 * @page: page number to read
1652 * @sndcmd: flag whether to issue read command or not
1653 */ 1663 */
1654static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1664static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1655 int page, int sndcmd) 1665 int page)
1656{ 1666{
1657 uint8_t *buf = chip->oob_poi; 1667 uint8_t *buf = chip->oob_poi;
1658 int length = mtd->oobsize; 1668 int length = mtd->oobsize;
@@ -1679,7 +1689,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1679 if (length > 0) 1689 if (length > 0)
1680 chip->read_buf(mtd, bufpoi, length); 1690 chip->read_buf(mtd, bufpoi, length);
1681 1691
1682 return 1; 1692 return 0;
1683} 1693}
1684 1694
1685/** 1695/**
@@ -1775,13 +1785,13 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
1775static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, 1785static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1776 struct mtd_oob_ops *ops) 1786 struct mtd_oob_ops *ops)
1777{ 1787{
1778 int page, realpage, chipnr, sndcmd = 1; 1788 int page, realpage, chipnr;
1779 struct nand_chip *chip = mtd->priv; 1789 struct nand_chip *chip = mtd->priv;
1780 struct mtd_ecc_stats stats; 1790 struct mtd_ecc_stats stats;
1781 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1782 int readlen = ops->ooblen; 1791 int readlen = ops->ooblen;
1783 int len; 1792 int len;
1784 uint8_t *buf = ops->oobbuf; 1793 uint8_t *buf = ops->oobbuf;
1794 int ret = 0;
1785 1795
1786 pr_debug("%s: from = 0x%08Lx, len = %i\n", 1796 pr_debug("%s: from = 0x%08Lx, len = %i\n",
1787 __func__, (unsigned long long)from, readlen); 1797 __func__, (unsigned long long)from, readlen);
@@ -1817,20 +1827,18 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1817 1827
1818 while (1) { 1828 while (1) {
1819 if (ops->mode == MTD_OPS_RAW) 1829 if (ops->mode == MTD_OPS_RAW)
1820 sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd); 1830 ret = chip->ecc.read_oob_raw(mtd, chip, page);
1821 else 1831 else
1822 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); 1832 ret = chip->ecc.read_oob(mtd, chip, page);
1833
1834 if (ret < 0)
1835 break;
1823 1836
1824 len = min(len, readlen); 1837 len = min(len, readlen);
1825 buf = nand_transfer_oob(chip, buf, ops, len); 1838 buf = nand_transfer_oob(chip, buf, ops, len);
1826 1839
1827 if (!(chip->options & NAND_NO_READRDY)) { 1840 if (!(chip->options & NAND_NO_READRDY)) {
1828 /* 1841 /* Apply delay or wait for ready/busy pin */
1829 * Apply delay or wait for ready/busy pin. Do this
1830 * before the AUTOINCR check, so no problems arise if a
1831 * chip which does auto increment is marked as
1832 * NOAUTOINCR by the board driver.
1833 */
1834 if (!chip->dev_ready) 1842 if (!chip->dev_ready)
1835 udelay(chip->chip_delay); 1843 udelay(chip->chip_delay);
1836 else 1844 else
@@ -1851,16 +1859,12 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1851 chip->select_chip(mtd, -1); 1859 chip->select_chip(mtd, -1);
1852 chip->select_chip(mtd, chipnr); 1860 chip->select_chip(mtd, chipnr);
1853 } 1861 }
1854
1855 /*
1856 * Check, if the chip supports auto page increment or if we
1857 * have hit a block boundary.
1858 */
1859 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1860 sndcmd = 1;
1861 } 1862 }
1862 1863
1863 ops->oobretlen = ops->ooblen; 1864 ops->oobretlen = ops->ooblen - readlen;
1865
1866 if (ret < 0)
1867 return ret;
1864 1868
1865 if (mtd->ecc_stats.failed - stats.failed) 1869 if (mtd->ecc_stats.failed - stats.failed)
1866 return -EBADMSG; 1870 return -EBADMSG;
@@ -1919,14 +1923,16 @@ out:
1919 * @mtd: mtd info structure 1923 * @mtd: mtd info structure
1920 * @chip: nand chip info structure 1924 * @chip: nand chip info structure
1921 * @buf: data buffer 1925 * @buf: data buffer
1926 * @oob_required: must write chip->oob_poi to OOB
1922 * 1927 *
1923 * Not for syndrome calculating ECC controllers, which use a special oob layout. 1928 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1924 */ 1929 */
1925static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1930static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1926 const uint8_t *buf) 1931 const uint8_t *buf, int oob_required)
1927{ 1932{
1928 chip->write_buf(mtd, buf, mtd->writesize); 1933 chip->write_buf(mtd, buf, mtd->writesize);
1929 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1934 if (oob_required)
1935 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1930} 1936}
1931 1937
1932/** 1938/**
@@ -1934,12 +1940,13 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1934 * @mtd: mtd info structure 1940 * @mtd: mtd info structure
1935 * @chip: nand chip info structure 1941 * @chip: nand chip info structure
1936 * @buf: data buffer 1942 * @buf: data buffer
1943 * @oob_required: must write chip->oob_poi to OOB
1937 * 1944 *
1938 * We need a special oob layout and handling even when ECC isn't checked. 1945 * We need a special oob layout and handling even when ECC isn't checked.
1939 */ 1946 */
1940static void nand_write_page_raw_syndrome(struct mtd_info *mtd, 1947static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1941 struct nand_chip *chip, 1948 struct nand_chip *chip,
1942 const uint8_t *buf) 1949 const uint8_t *buf, int oob_required)
1943{ 1950{
1944 int eccsize = chip->ecc.size; 1951 int eccsize = chip->ecc.size;
1945 int eccbytes = chip->ecc.bytes; 1952 int eccbytes = chip->ecc.bytes;
@@ -1973,9 +1980,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1973 * @mtd: mtd info structure 1980 * @mtd: mtd info structure
1974 * @chip: nand chip info structure 1981 * @chip: nand chip info structure
1975 * @buf: data buffer 1982 * @buf: data buffer
1983 * @oob_required: must write chip->oob_poi to OOB
1976 */ 1984 */
1977static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1985static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1978 const uint8_t *buf) 1986 const uint8_t *buf, int oob_required)
1979{ 1987{
1980 int i, eccsize = chip->ecc.size; 1988 int i, eccsize = chip->ecc.size;
1981 int eccbytes = chip->ecc.bytes; 1989 int eccbytes = chip->ecc.bytes;
@@ -1991,7 +1999,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1991 for (i = 0; i < chip->ecc.total; i++) 1999 for (i = 0; i < chip->ecc.total; i++)
1992 chip->oob_poi[eccpos[i]] = ecc_calc[i]; 2000 chip->oob_poi[eccpos[i]] = ecc_calc[i];
1993 2001
1994 chip->ecc.write_page_raw(mtd, chip, buf); 2002 chip->ecc.write_page_raw(mtd, chip, buf, 1);
1995} 2003}
1996 2004
1997/** 2005/**
@@ -1999,9 +2007,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1999 * @mtd: mtd info structure 2007 * @mtd: mtd info structure
2000 * @chip: nand chip info structure 2008 * @chip: nand chip info structure
2001 * @buf: data buffer 2009 * @buf: data buffer
2010 * @oob_required: must write chip->oob_poi to OOB
2002 */ 2011 */
2003static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 2012static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2004 const uint8_t *buf) 2013 const uint8_t *buf, int oob_required)
2005{ 2014{
2006 int i, eccsize = chip->ecc.size; 2015 int i, eccsize = chip->ecc.size;
2007 int eccbytes = chip->ecc.bytes; 2016 int eccbytes = chip->ecc.bytes;
@@ -2027,12 +2036,14 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2027 * @mtd: mtd info structure 2036 * @mtd: mtd info structure
2028 * @chip: nand chip info structure 2037 * @chip: nand chip info structure
2029 * @buf: data buffer 2038 * @buf: data buffer
2039 * @oob_required: must write chip->oob_poi to OOB
2030 * 2040 *
2031 * The hw generator calculates the error syndrome automatically. Therefore we 2041 * The hw generator calculates the error syndrome automatically. Therefore we
2032 * need a special oob layout and handling. 2042 * need a special oob layout and handling.
2033 */ 2043 */
2034static void nand_write_page_syndrome(struct mtd_info *mtd, 2044static void nand_write_page_syndrome(struct mtd_info *mtd,
2035 struct nand_chip *chip, const uint8_t *buf) 2045 struct nand_chip *chip,
2046 const uint8_t *buf, int oob_required)
2036{ 2047{
2037 int i, eccsize = chip->ecc.size; 2048 int i, eccsize = chip->ecc.size;
2038 int eccbytes = chip->ecc.bytes; 2049 int eccbytes = chip->ecc.bytes;
@@ -2071,21 +2082,23 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
2071 * @mtd: MTD device structure 2082 * @mtd: MTD device structure
2072 * @chip: NAND chip descriptor 2083 * @chip: NAND chip descriptor
2073 * @buf: the data to write 2084 * @buf: the data to write
2085 * @oob_required: must write chip->oob_poi to OOB
2074 * @page: page number to write 2086 * @page: page number to write
2075 * @cached: cached programming 2087 * @cached: cached programming
2076 * @raw: use _raw version of write_page 2088 * @raw: use _raw version of write_page
2077 */ 2089 */
2078static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 2090static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2079 const uint8_t *buf, int page, int cached, int raw) 2091 const uint8_t *buf, int oob_required, int page,
2092 int cached, int raw)
2080{ 2093{
2081 int status; 2094 int status;
2082 2095
2083 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 2096 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2084 2097
2085 if (unlikely(raw)) 2098 if (unlikely(raw))
2086 chip->ecc.write_page_raw(mtd, chip, buf); 2099 chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
2087 else 2100 else
2088 chip->ecc.write_page(mtd, chip, buf); 2101 chip->ecc.write_page(mtd, chip, buf, oob_required);
2089 2102
2090 /* 2103 /*
2091 * Cached progamming disabled for now. Not sure if it's worth the 2104 * Cached progamming disabled for now. Not sure if it's worth the
@@ -2118,6 +2131,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2118 2131
2119 if (chip->verify_buf(mtd, buf, mtd->writesize)) 2132 if (chip->verify_buf(mtd, buf, mtd->writesize))
2120 return -EIO; 2133 return -EIO;
2134
2135 /* Make sure the next page prog is preceded by a status read */
2136 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
2121#endif 2137#endif
2122 return 0; 2138 return 0;
2123} 2139}
@@ -2202,6 +2218,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2202 uint8_t *oob = ops->oobbuf; 2218 uint8_t *oob = ops->oobbuf;
2203 uint8_t *buf = ops->datbuf; 2219 uint8_t *buf = ops->datbuf;
2204 int ret, subpage; 2220 int ret, subpage;
2221 int oob_required = oob ? 1 : 0;
2205 2222
2206 ops->retlen = 0; 2223 ops->retlen = 0;
2207 if (!writelen) 2224 if (!writelen)
@@ -2264,8 +2281,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2264 memset(chip->oob_poi, 0xff, mtd->oobsize); 2281 memset(chip->oob_poi, 0xff, mtd->oobsize);
2265 } 2282 }
2266 2283
2267 ret = chip->write_page(mtd, chip, wbuf, page, cached, 2284 ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
2268 (ops->mode == MTD_OPS_RAW)); 2285 cached, (ops->mode == MTD_OPS_RAW));
2269 if (ret) 2286 if (ret)
2270 break; 2287 break;
2271 2288
@@ -2898,8 +2915,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2898 *busw = NAND_BUSWIDTH_16; 2915 *busw = NAND_BUSWIDTH_16;
2899 2916
2900 chip->options &= ~NAND_CHIPOPTIONS_MSK; 2917 chip->options &= ~NAND_CHIPOPTIONS_MSK;
2901 chip->options |= (NAND_NO_READRDY | 2918 chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
2902 NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
2903 2919
2904 pr_info("ONFI flash detected\n"); 2920 pr_info("ONFI flash detected\n");
2905 return 1; 2921 return 1;
@@ -3076,11 +3092,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3076 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; 3092 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
3077ident_done: 3093ident_done:
3078 3094
3079 /*
3080 * Set chip as a default. Board drivers can override it, if necessary.
3081 */
3082 chip->options |= NAND_NO_AUTOINCR;
3083
3084 /* Try to identify manufacturer */ 3095 /* Try to identify manufacturer */
3085 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) { 3096 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
3086 if (nand_manuf_ids[maf_idx].id == *maf_id) 3097 if (nand_manuf_ids[maf_idx].id == *maf_id)
@@ -3154,10 +3165,11 @@ ident_done:
3154 if (mtd->writesize > 512 && chip->cmdfunc == nand_command) 3165 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3155 chip->cmdfunc = nand_command_lp; 3166 chip->cmdfunc = nand_command_lp;
3156 3167
3157 pr_info("NAND device: Manufacturer ID:" 3168 pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
3158 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, 3169 " page size: %d, OOB size: %d\n",
3159 nand_manuf_ids[maf_idx].name, 3170 *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
3160 chip->onfi_version ? chip->onfi_params.model : type->name); 3171 chip->onfi_version ? chip->onfi_params.model : type->name,
3172 mtd->writesize, mtd->oobsize);
3161 3173
3162 return type; 3174 return type;
3163} 3175}
@@ -3329,8 +3341,13 @@ int nand_scan_tail(struct mtd_info *mtd)
3329 if (!chip->ecc.write_oob) 3341 if (!chip->ecc.write_oob)
3330 chip->ecc.write_oob = nand_write_oob_syndrome; 3342 chip->ecc.write_oob = nand_write_oob_syndrome;
3331 3343
3332 if (mtd->writesize >= chip->ecc.size) 3344 if (mtd->writesize >= chip->ecc.size) {
3345 if (!chip->ecc.strength) {
3346 pr_warn("Driver must set ecc.strength when using hardware ECC\n");
3347 BUG();
3348 }
3333 break; 3349 break;
3350 }
3334 pr_warn("%d byte HW ECC not possible on " 3351 pr_warn("%d byte HW ECC not possible on "
3335 "%d byte page size, fallback to SW ECC\n", 3352 "%d byte page size, fallback to SW ECC\n",
3336 chip->ecc.size, mtd->writesize); 3353 chip->ecc.size, mtd->writesize);
@@ -3385,7 +3402,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3385 BUG(); 3402 BUG();
3386 } 3403 }
3387 chip->ecc.strength = 3404 chip->ecc.strength =
3388 chip->ecc.bytes*8 / fls(8*chip->ecc.size); 3405 chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
3389 break; 3406 break;
3390 3407
3391 case NAND_ECC_NONE: 3408 case NAND_ECC_NONE:
@@ -3483,7 +3500,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3483 3500
3484 /* propagate ecc info to mtd_info */ 3501 /* propagate ecc info to mtd_info */
3485 mtd->ecclayout = chip->ecc.layout; 3502 mtd->ecclayout = chip->ecc.layout;
3486 mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps; 3503 mtd->ecc_strength = chip->ecc.strength;
3487 3504
3488 /* Check, if we should skip the bad block table scan */ 3505 /* Check, if we should skip the bad block table scan */
3489 if (chip->options & NAND_SKIP_BBTSCAN) 3506 if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 20a112f591fe..30d1319ff065 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -324,6 +324,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
324 324
325 buf += mtd->oobsize + mtd->writesize; 325 buf += mtd->oobsize + mtd->writesize;
326 len -= mtd->writesize; 326 len -= mtd->writesize;
327 offs += mtd->writesize;
327 } 328 }
328 return 0; 329 return 0;
329} 330}
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index af4fe8ca7b5e..621b70b7a159 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = {
70 * These are the new chips with large page size. The pagesize and the 70 * These are the new chips with large page size. The pagesize and the
71 * erasesize is determined from the extended id bytes 71 * erasesize is determined from the extended id bytes
72 */ 72 */
73#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR) 73#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY)
74#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) 74#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
75 75
76 /* 512 Megabit */ 76 /* 512 Megabit */
@@ -157,9 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = {
157 * writes possible, but not implemented now 157 * writes possible, but not implemented now
158 */ 158 */
159 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, 159 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
160 NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY | 160 NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
161 BBT_AUTO_REFRESH
162 },
163 161
164 {NULL,} 162 {NULL,}
165}; 163};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 261f478f8cc3..6cc8fbfabb8e 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -268,7 +268,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
268#define OPT_PAGE512 0x00000002 /* 512-byte page chips */ 268#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
269#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ 269#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
270#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ 270#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
271#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
272#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ 271#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
273#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ 272#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
274#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ 273#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -594,7 +593,7 @@ static int init_nandsim(struct mtd_info *mtd)
594 ns->options |= OPT_PAGE256; 593 ns->options |= OPT_PAGE256;
595 } 594 }
596 else if (ns->geom.pgsz == 512) { 595 else if (ns->geom.pgsz == 512) {
597 ns->options |= (OPT_PAGE512 | OPT_AUTOINCR); 596 ns->options |= OPT_PAGE512;
598 if (ns->busw == 8) 597 if (ns->busw == 8)
599 ns->options |= OPT_PAGE512_8BIT; 598 ns->options |= OPT_PAGE512_8BIT;
600 } else if (ns->geom.pgsz == 2048) { 599 } else if (ns->geom.pgsz == 2048) {
@@ -663,8 +662,6 @@ static int init_nandsim(struct mtd_info *mtd)
663 for (i = 0; nand_flash_ids[i].name != NULL; i++) { 662 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
664 if (second_id_byte != nand_flash_ids[i].id) 663 if (second_id_byte != nand_flash_ids[i].id)
665 continue; 664 continue;
666 if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
667 ns->options |= OPT_AUTOINCR;
668 } 665 }
669 666
670 if (ns->busw == 16) 667 if (ns->busw == 16)
@@ -1936,20 +1933,8 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
1936 if (ns->regs.count == ns->regs.num) { 1933 if (ns->regs.count == ns->regs.num) {
1937 NS_DBG("read_byte: all bytes were read\n"); 1934 NS_DBG("read_byte: all bytes were read\n");
1938 1935
1939 /* 1936 if (NS_STATE(ns->nxstate) == STATE_READY)
1940 * The OPT_AUTOINCR allows to read next consecutive pages without
1941 * new read operation cycle.
1942 */
1943 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
1944 ns->regs.count = 0;
1945 if (ns->regs.row + 1 < ns->geom.pgnum)
1946 ns->regs.row += 1;
1947 NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
1948 do_state_action(ns, ACTION_CPY);
1949 }
1950 else if (NS_STATE(ns->nxstate) == STATE_READY)
1951 switch_state(ns); 1937 switch_state(ns);
1952
1953 } 1938 }
1954 1939
1955 return outb; 1940 return outb;
@@ -2203,14 +2188,7 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
2203 ns->regs.count += len; 2188 ns->regs.count += len;
2204 2189
2205 if (ns->regs.count == ns->regs.num) { 2190 if (ns->regs.count == ns->regs.num) {
2206 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) { 2191 if (NS_STATE(ns->nxstate) == STATE_READY)
2207 ns->regs.count = 0;
2208 if (ns->regs.row + 1 < ns->geom.pgnum)
2209 ns->regs.row += 1;
2210 NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
2211 do_state_action(ns, ACTION_CPY);
2212 }
2213 else if (NS_STATE(ns->nxstate) == STATE_READY)
2214 switch_state(ns); 2192 switch_state(ns);
2215 } 2193 }
2216 2194
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c2b0bba9d8b3..d7f681d0c9b9 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -21,6 +21,10 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#ifdef CONFIG_MTD_NAND_OMAP_BCH
25#include <linux/bch.h>
26#endif
27
24#include <plat/dma.h> 28#include <plat/dma.h>
25#include <plat/gpmc.h> 29#include <plat/gpmc.h>
26#include <plat/nand.h> 30#include <plat/nand.h>
@@ -127,6 +131,11 @@ struct omap_nand_info {
127 } iomode; 131 } iomode;
128 u_char *buf; 132 u_char *buf;
129 int buf_len; 133 int buf_len;
134
135#ifdef CONFIG_MTD_NAND_OMAP_BCH
136 struct bch_control *bch;
137 struct nand_ecclayout ecclayout;
138#endif
130}; 139};
131 140
132/** 141/**
@@ -402,7 +411,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
402 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); 411 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
403 if (ret) 412 if (ret)
404 /* PFPW engine is busy, use cpu copy method */ 413 /* PFPW engine is busy, use cpu copy method */
405 goto out_copy; 414 goto out_copy_unmap;
406 415
407 init_completion(&info->comp); 416 init_completion(&info->comp);
408 417
@@ -421,6 +430,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
421 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 430 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
422 return 0; 431 return 0;
423 432
433out_copy_unmap:
434 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
424out_copy: 435out_copy:
425 if (info->nand.options & NAND_BUSWIDTH_16) 436 if (info->nand.options & NAND_BUSWIDTH_16)
426 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) 437 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -879,7 +890,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
879 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 890 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
880 mtd); 891 mtd);
881 unsigned long timeo = jiffies; 892 unsigned long timeo = jiffies;
882 int status = NAND_STATUS_FAIL, state = this->state; 893 int status, state = this->state;
883 894
884 if (state == FL_ERASING) 895 if (state == FL_ERASING)
885 timeo += (HZ * 400) / 1000; 896 timeo += (HZ * 400) / 1000;
@@ -894,6 +905,8 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
894 break; 905 break;
895 cond_resched(); 906 cond_resched();
896 } 907 }
908
909 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
897 return status; 910 return status;
898} 911}
899 912
@@ -925,6 +938,226 @@ static int omap_dev_ready(struct mtd_info *mtd)
925 return 1; 938 return 1;
926} 939}
927 940
941#ifdef CONFIG_MTD_NAND_OMAP_BCH
942
943/**
944 * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
945 * @mtd: MTD device structure
946 * @mode: Read/Write mode
947 */
948static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
949{
950 int nerrors;
951 unsigned int dev_width;
952 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
953 mtd);
954 struct nand_chip *chip = mtd->priv;
955
956 nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
957 dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
958 /*
959 * Program GPMC to perform correction on one 512-byte sector at a time.
960 * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
961 * gives a slight (5%) performance gain (but requires additional code).
962 */
963 (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
964}
965
966/**
967 * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
968 * @mtd: MTD device structure
969 * @dat: The pointer to data on which ecc is computed
970 * @ecc_code: The ecc_code buffer
971 */
972static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
973 u_char *ecc_code)
974{
975 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
976 mtd);
977 return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
978}
979
980/**
981 * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
982 * @mtd: MTD device structure
983 * @dat: The pointer to data on which ecc is computed
984 * @ecc_code: The ecc_code buffer
985 */
986static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
987 u_char *ecc_code)
988{
989 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
990 mtd);
991 return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
992}
993
994/**
995 * omap3_correct_data_bch - Decode received data and correct errors
996 * @mtd: MTD device structure
997 * @data: page data
998 * @read_ecc: ecc read from nand flash
999 * @calc_ecc: ecc read from HW ECC registers
1000 */
1001static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
1002 u_char *read_ecc, u_char *calc_ecc)
1003{
1004 int i, count;
1005 /* cannot correct more than 8 errors */
1006 unsigned int errloc[8];
1007 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1008 mtd);
1009
1010 count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
1011 errloc);
1012 if (count > 0) {
1013 /* correct errors */
1014 for (i = 0; i < count; i++) {
1015 /* correct data only, not ecc bytes */
1016 if (errloc[i] < 8*512)
1017 data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
1018 pr_debug("corrected bitflip %u\n", errloc[i]);
1019 }
1020 } else if (count < 0) {
1021 pr_err("ecc unrecoverable error\n");
1022 }
1023 return count;
1024}
1025
1026/**
1027 * omap3_free_bch - Release BCH ecc resources
1028 * @mtd: MTD device structure
1029 */
1030static void omap3_free_bch(struct mtd_info *mtd)
1031{
1032 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1033 mtd);
1034 if (info->bch) {
1035 free_bch(info->bch);
1036 info->bch = NULL;
1037 }
1038}
1039
1040/**
1041 * omap3_init_bch - Initialize BCH ECC
1042 * @mtd: MTD device structure
1043 * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
1044 */
1045static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1046{
1047 int ret, max_errors;
1048 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1049 mtd);
1050#ifdef CONFIG_MTD_NAND_OMAP_BCH8
1051 const int hw_errors = 8;
1052#else
1053 const int hw_errors = 4;
1054#endif
1055 info->bch = NULL;
1056
1057 max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
1058 if (max_errors != hw_errors) {
1059 pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
1060 max_errors, hw_errors);
1061 goto fail;
1062 }
1063
1064 /* initialize GPMC BCH engine */
1065 ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
1066 if (ret)
1067 goto fail;
1068
1069 /* software bch library is only used to detect and locate errors */
1070 info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
1071 if (!info->bch)
1072 goto fail;
1073
1074 info->nand.ecc.size = 512;
1075 info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
1076 info->nand.ecc.correct = omap3_correct_data_bch;
1077 info->nand.ecc.mode = NAND_ECC_HW;
1078
1079 /*
1080 * The number of corrected errors in an ecc block that will trigger
1081 * block scrubbing defaults to the ecc strength (4 or 8).
1082 * Set mtd->bitflip_threshold here to define a custom threshold.
1083 */
1084
1085 if (max_errors == 8) {
1086 info->nand.ecc.strength = 8;
1087 info->nand.ecc.bytes = 13;
1088 info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
1089 } else {
1090 info->nand.ecc.strength = 4;
1091 info->nand.ecc.bytes = 7;
1092 info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
1093 }
1094
1095 pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
1096 return 0;
1097fail:
1098 omap3_free_bch(mtd);
1099 return -1;
1100}
1101
1102/**
1103 * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
1104 * @mtd: MTD device structure
1105 */
1106static int omap3_init_bch_tail(struct mtd_info *mtd)
1107{
1108 int i, steps;
1109 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1110 mtd);
1111 struct nand_ecclayout *layout = &info->ecclayout;
1112
1113 /* build oob layout */
1114 steps = mtd->writesize/info->nand.ecc.size;
1115 layout->eccbytes = steps*info->nand.ecc.bytes;
1116
1117 /* do not bother creating special oob layouts for small page devices */
1118 if (mtd->oobsize < 64) {
1119 pr_err("BCH ecc is not supported on small page devices\n");
1120 goto fail;
1121 }
1122
1123 /* reserve 2 bytes for bad block marker */
1124 if (layout->eccbytes+2 > mtd->oobsize) {
1125 pr_err("no oob layout available for oobsize %d eccbytes %u\n",
1126 mtd->oobsize, layout->eccbytes);
1127 goto fail;
1128 }
1129
1130 /* put ecc bytes at oob tail */
1131 for (i = 0; i < layout->eccbytes; i++)
1132 layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
1133
1134 layout->oobfree[0].offset = 2;
1135 layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
1136 info->nand.ecc.layout = layout;
1137
1138 if (!(info->nand.options & NAND_BUSWIDTH_16))
1139 info->nand.badblock_pattern = &bb_descrip_flashbased;
1140 return 0;
1141fail:
1142 omap3_free_bch(mtd);
1143 return -1;
1144}
1145
1146#else
1147static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1148{
1149 pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
1150 return -1;
1151}
1152static int omap3_init_bch_tail(struct mtd_info *mtd)
1153{
1154 return -1;
1155}
1156static void omap3_free_bch(struct mtd_info *mtd)
1157{
1158}
1159#endif /* CONFIG_MTD_NAND_OMAP_BCH */
1160
928static int __devinit omap_nand_probe(struct platform_device *pdev) 1161static int __devinit omap_nand_probe(struct platform_device *pdev)
929{ 1162{
930 struct omap_nand_info *info; 1163 struct omap_nand_info *info;
@@ -1063,6 +1296,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1063 info->nand.ecc.hwctl = omap_enable_hwecc; 1296 info->nand.ecc.hwctl = omap_enable_hwecc;
1064 info->nand.ecc.correct = omap_correct_data; 1297 info->nand.ecc.correct = omap_correct_data;
1065 info->nand.ecc.mode = NAND_ECC_HW; 1298 info->nand.ecc.mode = NAND_ECC_HW;
1299 } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1300 (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1301 err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
1302 if (err) {
1303 err = -EINVAL;
1304 goto out_release_mem_region;
1305 }
1066 } 1306 }
1067 1307
1068 /* DIP switches on some boards change between 8 and 16 bit 1308 /* DIP switches on some boards change between 8 and 16 bit
@@ -1094,6 +1334,14 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1094 (offset + omap_oobinfo.eccbytes); 1334 (offset + omap_oobinfo.eccbytes);
1095 1335
1096 info->nand.ecc.layout = &omap_oobinfo; 1336 info->nand.ecc.layout = &omap_oobinfo;
1337 } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1338 (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1339 /* build OOB layout for BCH ECC correction */
1340 err = omap3_init_bch_tail(&info->mtd);
1341 if (err) {
1342 err = -EINVAL;
1343 goto out_release_mem_region;
1344 }
1097 } 1345 }
1098 1346
1099 /* second phase scan */ 1347 /* second phase scan */
@@ -1122,6 +1370,7 @@ static int omap_nand_remove(struct platform_device *pdev)
1122 struct mtd_info *mtd = platform_get_drvdata(pdev); 1370 struct mtd_info *mtd = platform_get_drvdata(pdev);
1123 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1371 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1124 mtd); 1372 mtd);
1373 omap3_free_bch(&info->mtd);
1125 1374
1126 platform_set_drvdata(pdev, NULL); 1375 platform_set_drvdata(pdev, NULL);
1127 if (info->dma_ch != -1) 1376 if (info->dma_ch != -1)
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 974dbf8251c9..1440e51cedcc 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -155,7 +155,6 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
155 chip->ecc.mode = NAND_ECC_SOFT; 155 chip->ecc.mode = NAND_ECC_SOFT;
156 156
157 /* Enable the following for a flash based bad block table */ 157 /* Enable the following for a flash based bad block table */
158 chip->options = NAND_NO_AUTOINCR;
159 chip->bbt_options = NAND_BBT_USE_FLASH; 158 chip->bbt_options = NAND_BBT_USE_FLASH;
160 159
161 /* Scan to find existence of the device */ 160 /* Scan to find existence of the device */
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 6404e6e81b10..1bcb52040422 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -23,14 +23,18 @@ struct plat_nand_data {
23 void __iomem *io_base; 23 void __iomem *io_base;
24}; 24};
25 25
26static const char *part_probe_types[] = { "cmdlinepart", NULL };
27
26/* 28/*
27 * Probe for the NAND device. 29 * Probe for the NAND device.
28 */ 30 */
29static int __devinit plat_nand_probe(struct platform_device *pdev) 31static int __devinit plat_nand_probe(struct platform_device *pdev)
30{ 32{
31 struct platform_nand_data *pdata = pdev->dev.platform_data; 33 struct platform_nand_data *pdata = pdev->dev.platform_data;
34 struct mtd_part_parser_data ppdata;
32 struct plat_nand_data *data; 35 struct plat_nand_data *data;
33 struct resource *res; 36 struct resource *res;
37 const char **part_types;
34 int err = 0; 38 int err = 0;
35 39
36 if (pdata->chip.nr_chips < 1) { 40 if (pdata->chip.nr_chips < 1) {
@@ -75,6 +79,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
75 data->chip.select_chip = pdata->ctrl.select_chip; 79 data->chip.select_chip = pdata->ctrl.select_chip;
76 data->chip.write_buf = pdata->ctrl.write_buf; 80 data->chip.write_buf = pdata->ctrl.write_buf;
77 data->chip.read_buf = pdata->ctrl.read_buf; 81 data->chip.read_buf = pdata->ctrl.read_buf;
82 data->chip.read_byte = pdata->ctrl.read_byte;
78 data->chip.chip_delay = pdata->chip.chip_delay; 83 data->chip.chip_delay = pdata->chip.chip_delay;
79 data->chip.options |= pdata->chip.options; 84 data->chip.options |= pdata->chip.options;
80 data->chip.bbt_options |= pdata->chip.bbt_options; 85 data->chip.bbt_options |= pdata->chip.bbt_options;
@@ -98,8 +103,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
98 goto out; 103 goto out;
99 } 104 }
100 105
101 err = mtd_device_parse_register(&data->mtd, 106 part_types = pdata->chip.part_probe_types ? : part_probe_types;
102 pdata->chip.part_probe_types, NULL, 107
108 ppdata.of_node = pdev->dev.of_node;
109 err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
103 pdata->chip.partitions, 110 pdata->chip.partitions,
104 pdata->chip.nr_partitions); 111 pdata->chip.nr_partitions);
105 112
@@ -140,12 +147,19 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
140 return 0; 147 return 0;
141} 148}
142 149
150static const struct of_device_id plat_nand_match[] = {
151 { .compatible = "gen_nand" },
152 {},
153};
154MODULE_DEVICE_TABLE(of, plat_nand_match);
155
143static struct platform_driver plat_nand_driver = { 156static struct platform_driver plat_nand_driver = {
144 .probe = plat_nand_probe, 157 .probe = plat_nand_probe,
145 .remove = __devexit_p(plat_nand_remove), 158 .remove = __devexit_p(plat_nand_remove),
146 .driver = { 159 .driver = {
147 .name = "gen_nand", 160 .name = "gen_nand",
148 .owner = THIS_MODULE, 161 .owner = THIS_MODULE,
162 .of_match_table = plat_nand_match,
149 }, 163 },
150}; 164};
151 165
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index def50caa6f84..252aaefcacfa 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -682,14 +682,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
682} 682}
683 683
684static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, 684static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
685 struct nand_chip *chip, const uint8_t *buf) 685 struct nand_chip *chip, const uint8_t *buf, int oob_required)
686{ 686{
687 chip->write_buf(mtd, buf, mtd->writesize); 687 chip->write_buf(mtd, buf, mtd->writesize);
688 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 688 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
689} 689}
690 690
691static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, 691static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
692 struct nand_chip *chip, uint8_t *buf, int page) 692 struct nand_chip *chip, uint8_t *buf, int oob_required,
693 int page)
693{ 694{
694 struct pxa3xx_nand_host *host = mtd->priv; 695 struct pxa3xx_nand_host *host = mtd->priv;
695 struct pxa3xx_nand_info *info = host->info_data; 696 struct pxa3xx_nand_info *info = host->info_data;
@@ -1004,7 +1005,6 @@ KEEP_CONFIG:
1004 chip->ecc.size = host->page_size; 1005 chip->ecc.size = host->page_size;
1005 chip->ecc.strength = 1; 1006 chip->ecc.strength = 1;
1006 1007
1007 chip->options = NAND_NO_AUTOINCR;
1008 chip->options |= NAND_NO_READRDY; 1008 chip->options |= NAND_NO_READRDY;
1009 if (host->reg_ndcr & NDCR_DWIDTH_M) 1009 if (host->reg_ndcr & NDCR_DWIDTH_M)
1010 chip->options |= NAND_BUSWIDTH_16; 1010 chip->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index c2040187c813..8cb627751c9c 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -539,14 +539,11 @@ exit:
539 * nand_read_oob_syndrome assumes we can send column address - we can't 539 * nand_read_oob_syndrome assumes we can send column address - we can't
540 */ 540 */
541static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 541static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
542 int page, int sndcmd) 542 int page)
543{ 543{
544 if (sndcmd) { 544 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
545 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
546 sndcmd = 0;
547 }
548 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 545 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
549 return sndcmd; 546 return 0;
550} 547}
551 548
552/* 549/*
@@ -1104,18 +1101,7 @@ static struct pci_driver r852_pci_driver = {
1104 .driver.pm = &r852_pm_ops, 1101 .driver.pm = &r852_pm_ops,
1105}; 1102};
1106 1103
1107static __init int r852_module_init(void) 1104module_pci_driver(r852_pci_driver);
1108{
1109 return pci_register_driver(&r852_pci_driver);
1110}
1111
1112static void __exit r852_module_exit(void)
1113{
1114 pci_unregister_driver(&r852_pci_driver);
1115}
1116
1117module_init(r852_module_init);
1118module_exit(r852_module_exit);
1119 1105
1120MODULE_LICENSE("GPL"); 1106MODULE_LICENSE("GPL");
1121MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 1107MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e9b2b260de3a..aa9b8a5e0b8f 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -344,7 +344,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
344} 344}
345 345
346static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 346static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
347 uint8_t *buf, int page) 347 uint8_t *buf, int oob_required, int page)
348{ 348{
349 int i, eccsize = chip->ecc.size; 349 int i, eccsize = chip->ecc.size;
350 int eccbytes = chip->ecc.bytes; 350 int eccbytes = chip->ecc.bytes;
@@ -359,14 +359,14 @@ static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
359 if (flctl->hwecc_cant_correct[i]) 359 if (flctl->hwecc_cant_correct[i])
360 mtd->ecc_stats.failed++; 360 mtd->ecc_stats.failed++;
361 else 361 else
362 mtd->ecc_stats.corrected += 0; 362 mtd->ecc_stats.corrected += 0; /* FIXME */
363 } 363 }
364 364
365 return 0; 365 return 0;
366} 366}
367 367
368static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 368static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
369 const uint8_t *buf) 369 const uint8_t *buf, int oob_required)
370{ 370{
371 int i, eccsize = chip->ecc.size; 371 int i, eccsize = chip->ecc.size;
372 int eccbytes = chip->ecc.bytes; 372 int eccbytes = chip->ecc.bytes;
@@ -881,8 +881,6 @@ static int __devinit flctl_probe(struct platform_device *pdev)
881 flctl->hwecc = pdata->has_hwecc; 881 flctl->hwecc = pdata->has_hwecc;
882 flctl->holden = pdata->use_holden; 882 flctl->holden = pdata->use_holden;
883 883
884 nand->options = NAND_NO_AUTOINCR;
885
886 /* Set address of hardware control function */ 884 /* Set address of hardware control function */
887 /* 20 us command delay time */ 885 /* 20 us command delay time */
888 nand->chip_delay = 20; 886 nand->chip_delay = 20;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 774c3c266713..082bcdcd6bcf 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -94,17 +94,16 @@ static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
94 {NULL,} 94 {NULL,}
95}; 95};
96 96
97#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
98static struct nand_flash_dev nand_xd_flash_ids[] = { 97static struct nand_flash_dev nand_xd_flash_ids[] = {
99 98
100 {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, 99 {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
101 {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, 100 {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
102 {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, 101 {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
103 {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, 102 {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
104 {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM}, 103 {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
105 {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM}, 104 {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
106 {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM}, 105 {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
107 {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM}, 106 {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
108 {NULL,} 107 {NULL,}
109}; 108};
110 109
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index b3ce12ef359e..7153e0d27101 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1201,7 +1201,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1201 if (mtd->ecc_stats.failed - stats.failed) 1201 if (mtd->ecc_stats.failed - stats.failed)
1202 return -EBADMSG; 1202 return -EBADMSG;
1203 1203
1204 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 1204 /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
1205 return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
1205} 1206}
1206 1207
1207/** 1208/**
@@ -1333,7 +1334,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1333 if (mtd->ecc_stats.failed - stats.failed) 1334 if (mtd->ecc_stats.failed - stats.failed)
1334 return -EBADMSG; 1335 return -EBADMSG;
1335 1336
1336 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 1337 /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
1338 return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
1337} 1339}
1338 1340
1339/** 1341/**
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 9f957c2d48e9..09d4f8d9d592 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -264,6 +264,9 @@ static struct dentry *dfs_rootdir;
264 */ 264 */
265int ubi_debugfs_init(void) 265int ubi_debugfs_init(void)
266{ 266{
267 if (!IS_ENABLED(DEBUG_FS))
268 return 0;
269
267 dfs_rootdir = debugfs_create_dir("ubi", NULL); 270 dfs_rootdir = debugfs_create_dir("ubi", NULL);
268 if (IS_ERR_OR_NULL(dfs_rootdir)) { 271 if (IS_ERR_OR_NULL(dfs_rootdir)) {
269 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); 272 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -281,7 +284,8 @@ int ubi_debugfs_init(void)
281 */ 284 */
282void ubi_debugfs_exit(void) 285void ubi_debugfs_exit(void)
283{ 286{
284 debugfs_remove(dfs_rootdir); 287 if (IS_ENABLED(DEBUG_FS))
288 debugfs_remove(dfs_rootdir);
285} 289}
286 290
287/* Read an UBI debugfs file */ 291/* Read an UBI debugfs file */
@@ -403,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
403 struct dentry *dent; 407 struct dentry *dent;
404 struct ubi_debug_info *d = ubi->dbg; 408 struct ubi_debug_info *d = ubi->dbg;
405 409
410 if (!IS_ENABLED(DEBUG_FS))
411 return 0;
412
406 n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, 413 n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
407 ubi->ubi_num); 414 ubi->ubi_num);
408 if (n == UBI_DFS_DIR_LEN) { 415 if (n == UBI_DFS_DIR_LEN) {
@@ -470,5 +477,6 @@ out:
470 */ 477 */
471void ubi_debugfs_exit_dev(struct ubi_device *ubi) 478void ubi_debugfs_exit_dev(struct ubi_device *ubi)
472{ 479{
473 debugfs_remove_recursive(ubi->dbg->dfs_dir); 480 if (IS_ENABLED(DEBUG_FS))
481 debugfs_remove_recursive(ubi->dbg->dfs_dir);
474} 482}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9df100a4ec38..b6be644e7b85 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1262 dbg_wl("flush pending work for LEB %d:%d (%d pending works)", 1262 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1263 vol_id, lnum, ubi->works_count); 1263 vol_id, lnum, ubi->works_count);
1264 1264
1265 down_write(&ubi->work_sem);
1266 while (found) { 1265 while (found) {
1267 struct ubi_work *wrk; 1266 struct ubi_work *wrk;
1268 found = 0; 1267 found = 0;
1269 1268
1269 down_read(&ubi->work_sem);
1270 spin_lock(&ubi->wl_lock); 1270 spin_lock(&ubi->wl_lock);
1271 list_for_each_entry(wrk, &ubi->works, list) { 1271 list_for_each_entry(wrk, &ubi->works, list) {
1272 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && 1272 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
@@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1277 spin_unlock(&ubi->wl_lock); 1277 spin_unlock(&ubi->wl_lock);
1278 1278
1279 err = wrk->func(ubi, wrk, 0); 1279 err = wrk->func(ubi, wrk, 0);
1280 if (err) 1280 if (err) {
1281 goto out; 1281 up_read(&ubi->work_sem);
1282 return err;
1283 }
1284
1282 spin_lock(&ubi->wl_lock); 1285 spin_lock(&ubi->wl_lock);
1283 found = 1; 1286 found = 1;
1284 break; 1287 break;
1285 } 1288 }
1286 } 1289 }
1287 spin_unlock(&ubi->wl_lock); 1290 spin_unlock(&ubi->wl_lock);
1291 up_read(&ubi->work_sem);
1288 } 1292 }
1289 1293
1290out: 1294 /*
1295 * Make sure all the works which have been done in parallel are
1296 * finished.
1297 */
1298 down_write(&ubi->work_sem);
1291 up_write(&ubi->work_sem); 1299 up_write(&ubi->work_sem);
1300
1292 return err; 1301 return err;
1293} 1302}
1294 1303
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ee8cf9e8a3b..b9c2ae62166d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,6 +76,7 @@
76#include <net/route.h> 76#include <net/route.h>
77#include <net/net_namespace.h> 77#include <net/net_namespace.h>
78#include <net/netns/generic.h> 78#include <net/netns/generic.h>
79#include <net/pkt_sched.h>
79#include "bonding.h" 80#include "bonding.h"
80#include "bond_3ad.h" 81#include "bond_3ad.h"
81#include "bond_alb.h" 82#include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
381 return next; 382 return next;
382} 383}
383 384
384#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
385
386/** 385/**
387 * bond_dev_queue_xmit - Prepare skb for xmit. 386 * bond_dev_queue_xmit - Prepare skb for xmit.
388 * 387 *
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
395{ 394{
396 skb->dev = slave_dev; 395 skb->dev = slave_dev;
397 396
398 skb->queue_mapping = bond_queue_mapping(skb); 397 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
398 sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
399 skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
399 400
400 if (unlikely(netpoll_tx_running(slave_dev))) 401 if (unlikely(netpoll_tx_running(slave_dev)))
401 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 402 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -4171,7 +4172,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4171 /* 4172 /*
4172 * Save the original txq to restore before passing to the driver 4173 * Save the original txq to restore before passing to the driver
4173 */ 4174 */
4174 bond_queue_mapping(skb) = skb->queue_mapping; 4175 qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
4175 4176
4176 if (unlikely(txq >= dev->real_num_tx_queues)) { 4177 if (unlikely(txq >= dev->real_num_tx_queues)) {
4177 do { 4178 do {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index aef42f045320..485bedb8278c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
1082 } 1082 }
1083 } 1083 }
1084 1084
1085 pr_info("%s: Unable to set %.*s as primary slave.\n", 1085 strncpy(bond->params.primary, ifname, IFNAMSIZ);
1086 bond->dev->name, (int)strlen(buf) - 1, buf); 1086 bond->params.primary[IFNAMSIZ - 1] = 0;
1087
1088 pr_info("%s: Recording %s as primary, "
1089 "but it has not been enslaved to %s yet.\n",
1090 bond->dev->name, ifname, bond->dev->name);
1087out: 1091out:
1088 write_unlock_bh(&bond->curr_slave_lock); 1092 write_unlock_bh(&bond->curr_slave_lock);
1089 read_unlock(&bond->lock); 1093 read_unlock(&bond->lock);
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 536bda072a16..8dc84d66eea1 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
686 * 686 *
687 * We iterate from priv->tx_echo to priv->tx_next and check if the 687 * We iterate from priv->tx_echo to priv->tx_next and check if the
688 * packet has been transmitted, echo it back to the CAN framework. 688 * packet has been transmitted, echo it back to the CAN framework.
689 * If we discover a not yet transmitted package, stop looking for more. 689 * If we discover a not yet transmitted packet, stop looking for more.
690 */ 690 */
691static void c_can_do_tx(struct net_device *dev) 691static void c_can_do_tx(struct net_device *dev)
692{ 692{
@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev)
698 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 698 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
699 msg_obj_no = get_tx_echo_msg_obj(priv); 699 msg_obj_no = get_tx_echo_msg_obj(priv);
700 val = c_can_read_reg32(priv, &priv->regs->txrqst1); 700 val = c_can_read_reg32(priv, &priv->regs->txrqst1);
701 if (!(val & (1 << msg_obj_no))) { 701 if (!(val & (1 << (msg_obj_no - 1)))) {
702 can_get_echo_skb(dev, 702 can_get_echo_skb(dev,
703 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 703 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
704 stats->tx_bytes += priv->read_reg(priv, 704 stats->tx_bytes += priv->read_reg(priv,
@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev)
706 & IF_MCONT_DLC_MASK; 706 & IF_MCONT_DLC_MASK;
707 stats->tx_packets++; 707 stats->tx_packets++;
708 c_can_inval_msg_object(dev, 0, msg_obj_no); 708 c_can_inval_msg_object(dev, 0, msg_obj_no);
709 } else {
710 break;
709 } 711 }
710 } 712 }
711 713
@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
950 struct net_device *dev = napi->dev; 952 struct net_device *dev = napi->dev;
951 struct c_can_priv *priv = netdev_priv(dev); 953 struct c_can_priv *priv = netdev_priv(dev);
952 954
953 irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 955 irqstatus = priv->irqstatus;
954 if (!irqstatus) 956 if (!irqstatus)
955 goto end; 957 goto end;
956 958
@@ -1028,12 +1030,11 @@ end:
1028 1030
1029static irqreturn_t c_can_isr(int irq, void *dev_id) 1031static irqreturn_t c_can_isr(int irq, void *dev_id)
1030{ 1032{
1031 u16 irqstatus;
1032 struct net_device *dev = (struct net_device *)dev_id; 1033 struct net_device *dev = (struct net_device *)dev_id;
1033 struct c_can_priv *priv = netdev_priv(dev); 1034 struct c_can_priv *priv = netdev_priv(dev);
1034 1035
1035 irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 1036 priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
1036 if (!irqstatus) 1037 if (!priv->irqstatus)
1037 return IRQ_NONE; 1038 return IRQ_NONE;
1038 1039
1039 /* disable all interrupts and schedule the NAPI */ 1040 /* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev)
1063 goto exit_irq_fail; 1064 goto exit_irq_fail;
1064 } 1065 }
1065 1066
1067 napi_enable(&priv->napi);
1068
1066 /* start the c_can controller */ 1069 /* start the c_can controller */
1067 c_can_start(dev); 1070 c_can_start(dev);
1068 1071
1069 napi_enable(&priv->napi);
1070 netif_start_queue(dev); 1072 netif_start_queue(dev);
1071 1073
1072 return 0; 1074 return 0;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 9b7fbef3d09a..5f32d34af507 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -76,6 +76,7 @@ struct c_can_priv {
76 unsigned int tx_next; 76 unsigned int tx_next;
77 unsigned int tx_echo; 77 unsigned int tx_echo;
78 void *priv; /* for board-specific data */ 78 void *priv; /* for board-specific data */
79 u16 irqstatus;
79}; 80};
80 81
81struct net_device *alloc_c_can_dev(void); 82struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 53115eee8075..688371cda37a 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
154 struct cc770_platform_data *pdata = pdev->dev.platform_data; 154 struct cc770_platform_data *pdata = pdev->dev.platform_data;
155 155
156 priv->can.clock.freq = pdata->osc_freq; 156 priv->can.clock.freq = pdata->osc_freq;
157 if (priv->cpu_interface | CPUIF_DSC) 157 if (priv->cpu_interface & CPUIF_DSC)
158 priv->can.clock.freq /= 2; 158 priv->can.clock.freq /= 2;
159 priv->clkout = pdata->cor; 159 priv->clkout = pdata->cor;
160 priv->bus_config = pdata->bcr; 160 priv->bus_config = pdata->bcr;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 442d91a2747b..bab0158f1cc3 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void)
187 rtnl_lock(); 187 rtnl_lock();
188 err = __rtnl_link_register(&dummy_link_ops); 188 err = __rtnl_link_register(&dummy_link_ops);
189 189
190 for (i = 0; i < numdummies && !err; i++) 190 for (i = 0; i < numdummies && !err; i++) {
191 err = dummy_init_one(); 191 err = dummy_init_one();
192 cond_resched();
193 }
192 if (err < 0) 194 if (err < 0)
193 __rtnl_link_unregister(&dummy_link_ops); 195 __rtnl_link_unregister(&dummy_link_ops);
194 rtnl_unlock(); 196 rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e30e2a2f354c..7de824184979 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -747,21 +747,6 @@ struct bnx2x_fastpath {
747 747
748#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG 748#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
749 749
750#define BNX2X_IP_CSUM_ERR(cqe) \
751 (!((cqe)->fast_path_cqe.status_flags & \
752 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
753 ((cqe)->fast_path_cqe.type_error_flags & \
754 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
755
756#define BNX2X_L4_CSUM_ERR(cqe) \
757 (!((cqe)->fast_path_cqe.status_flags & \
758 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
759 ((cqe)->fast_path_cqe.type_error_flags & \
760 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
761
762#define BNX2X_RX_CSUM_OK(cqe) \
763 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
764
765#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ 750#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
766 (((le16_to_cpu(flags) & \ 751 (((le16_to_cpu(flags) & \
767 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ 752 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ad0743bf4bde..cbc56f274e0c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
617 return 0; 617 return 0;
618} 618}
619 619
620static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
621 struct bnx2x_fastpath *fp)
622{
623 /* Do nothing if no IP/L4 csum validation was done */
624
625 if (cqe->fast_path_cqe.status_flags &
626 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
627 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
628 return;
629
630 /* If both IP/L4 validation were done, check if an error was found. */
631
632 if (cqe->fast_path_cqe.type_error_flags &
633 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635 fp->eth_q_stats.hw_csum_err++;
636 else
637 skb->ip_summed = CHECKSUM_UNNECESSARY;
638}
620 639
621int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 640int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
622{ 641{
@@ -806,13 +825,9 @@ reuse_rx:
806 825
807 skb_checksum_none_assert(skb); 826 skb_checksum_none_assert(skb);
808 827
809 if (bp->dev->features & NETIF_F_RXCSUM) { 828 if (bp->dev->features & NETIF_F_RXCSUM)
829 bnx2x_csum_validate(skb, cqe, fp);
810 830
811 if (likely(BNX2X_RX_CSUM_OK(cqe)))
812 skb->ip_summed = CHECKSUM_UNNECESSARY;
813 else
814 fp->eth_q_stats.hw_csum_err++;
815 }
816 831
817 skb_record_rx_queue(skb, fp->rx_queue); 832 skb_record_rx_queue(skb, fp->rx_queue);
818 833
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index edeeb516807a..e47ff8be1d7b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14275 } 14275 }
14276 } 14276 }
14277 14277
14278 if (tg3_flag(tp, 5755_PLUS)) 14278 if (tg3_flag(tp, 5755_PLUS) ||
14279 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14279 tg3_flag_set(tp, SHORT_DMA_BUG); 14280 tg3_flag_set(tp, SHORT_DMA_BUG);
14280 14281
14281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 14282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08efd308d78a..fdb50cec6b51 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
736 736
737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); 737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
738 if (copied) { 738 if (copied) {
739 int gso_segs = skb_shinfo(skb)->gso_segs;
740
739 /* record the sent skb in the sent_skb table */ 741 /* record the sent skb in the sent_skb table */
740 BUG_ON(txo->sent_skb_list[start]); 742 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb; 743 txo->sent_skb_list[start] = skb;
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
753 755
754 be_txq_notify(adapter, txq->id, wrb_cnt); 756 be_txq_notify(adapter, txq->id, wrb_cnt);
755 757
756 be_tx_stats_update(txo, wrb_cnt, copied, 758 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
757 skb_shinfo(skb)->gso_segs, stopped);
758 } else { 759 } else {
759 txq->head = start; 760 txq->head = start;
760 dev_kfree_skb_any(skb); 761 dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 97f947b3d94a..2933d08b036e 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
437 length = status & BCOM_FEC_RX_BD_LEN_MASK; 437 length = status & BCOM_FEC_RX_BD_LEN_MASK;
438 skb_put(rskb, length - 4); /* length without CRC32 */ 438 skb_put(rskb, length - 4); /* length without CRC32 */
439 rskb->protocol = eth_type_trans(rskb, dev); 439 rskb->protocol = eth_type_trans(rskb, dev);
440 if (!skb_defer_rx_timestamp(skb)) 440 if (!skb_defer_rx_timestamp(rskb))
441 netif_rx(rskb); 441 netif_rx(rskb);
442 442
443 spin_lock(&priv->lock); 443 spin_lock(&priv->lock);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 95731c841044..7483ca0a6282 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4080,7 +4080,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4080 spin_lock_irqsave(&adapter->stats_lock, 4080 spin_lock_irqsave(&adapter->stats_lock,
4081 irq_flags); 4081 irq_flags);
4082 e1000_tbi_adjust_stats(hw, &adapter->stats, 4082 e1000_tbi_adjust_stats(hw, &adapter->stats,
4083 length, skb->data); 4083 length, mapped);
4084 spin_unlock_irqrestore(&adapter->stats_lock, 4084 spin_unlock_irqrestore(&adapter->stats_lock,
4085 irq_flags); 4085 irq_flags);
4086 length--; 4086 length--;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d863075df7a4..905e2147d918 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev,
258 * When SoL/IDER sessions are active, autoneg/speed/duplex 258 * When SoL/IDER sessions are active, autoneg/speed/duplex
259 * cannot be changed 259 * cannot be changed
260 */ 260 */
261 if (hw->phy.ops.check_reset_block(hw)) { 261 if (hw->phy.ops.check_reset_block &&
262 hw->phy.ops.check_reset_block(hw)) {
262 e_err("Cannot change link characteristics when SoL/IDER is active.\n"); 263 e_err("Cannot change link characteristics when SoL/IDER is active.\n");
263 return -EINVAL; 264 return -EINVAL;
264 } 265 }
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1615 * PHY loopback cannot be performed if SoL/IDER 1616 * PHY loopback cannot be performed if SoL/IDER
1616 * sessions are active 1617 * sessions are active
1617 */ 1618 */
1618 if (hw->phy.ops.check_reset_block(hw)) { 1619 if (hw->phy.ops.check_reset_block &&
1620 hw->phy.ops.check_reset_block(hw)) {
1619 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); 1621 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
1620 *data = 0; 1622 *data = 0;
1621 goto out; 1623 goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index bbf70ba367da..238ab2f8a5e7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -165,14 +165,14 @@
165#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ 165#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
166 166
167/* Intel Rapid Start Technology Support */ 167/* Intel Rapid Start Technology Support */
168#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70) 168#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
169#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 169#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
170#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) 170#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
171#define I217_SxCTRL_MASK 0x1000 171#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
172#define I217_CGFREG PHY_REG(772, 29) 172#define I217_CGFREG PHY_REG(772, 29)
173#define I217_CGFREG_MASK 0x0002 173#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
174#define I217_MEMPWR PHY_REG(772, 26) 174#define I217_MEMPWR PHY_REG(772, 26)
175#define I217_MEMPWR_MASK 0x0010 175#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
176 176
177/* Strapping Option Register - RO */ 177/* Strapping Option Register - RO */
178#define E1000_STRAP 0x0000C 178#define E1000_STRAP 0x0000C
@@ -4089,12 +4089,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4089 * power good. 4089 * power good.
4090 */ 4090 */
4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); 4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
4092 phy_reg |= I217_SxCTRL_MASK; 4092 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); 4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
4094 4094
4095 /* Disable the SMB release on LCD reset. */ 4095 /* Disable the SMB release on LCD reset. */
4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4097 phy_reg &= ~I217_MEMPWR; 4097 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4099 } 4099 }
4100 4100
@@ -4103,7 +4103,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4103 * Support 4103 * Support
4104 */ 4104 */
4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4106 phy_reg |= I217_CGFREG_MASK; 4106 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4108 4108
4109release: 4109release:
@@ -4176,7 +4176,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4177 if (ret_val) 4177 if (ret_val)
4178 goto release; 4178 goto release;
4179 phy_reg |= I217_MEMPWR_MASK; 4179 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4181 4181
4182 /* Disable Proxy */ 4182 /* Disable Proxy */
@@ -4186,7 +4186,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4187 if (ret_val) 4187 if (ret_val)
4188 goto release; 4188 goto release;
4189 phy_reg &= ~I217_CGFREG_MASK; 4189 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4191release: 4191release:
4192 if (ret_val) 4192 if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 026e8b3ab52e..a13439928488 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
709 * In the case of the phy reset being blocked, we already have a link. 709 * In the case of the phy reset being blocked, we already have a link.
710 * We do not need to set it up again. 710 * We do not need to set it up again.
711 */ 711 */
712 if (hw->phy.ops.check_reset_block(hw)) 712 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
713 return 0; 713 return 0;
714 714
715 /* 715 /*
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a4b0435b00dc..31d37a2b5ba8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6237,7 +6237,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6237 adapter->hw.phy.ms_type = e1000_ms_hw_default; 6237 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6238 } 6238 }
6239 6239
6240 if (hw->phy.ops.check_reset_block(hw)) 6240 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6241 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6241 e_info("PHY reset is blocked due to SOL/IDER session.\n");
6242 6242
6243 /* Set initial default active device features */ 6243 /* Set initial default active device features */
@@ -6404,7 +6404,7 @@ err_register:
6404 if (!(adapter->flags & FLAG_HAS_AMT)) 6404 if (!(adapter->flags & FLAG_HAS_AMT))
6405 e1000e_release_hw_control(adapter); 6405 e1000e_release_hw_control(adapter);
6406err_eeprom: 6406err_eeprom:
6407 if (!hw->phy.ops.check_reset_block(hw)) 6407 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6408 e1000_phy_hw_reset(&adapter->hw); 6408 e1000_phy_hw_reset(&adapter->hw);
6409err_hw_init: 6409err_hw_init:
6410 kfree(adapter->tx_ring); 6410 kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0334d013bc3c..b860d4f7ea2a 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
2155 s32 ret_val; 2155 s32 ret_val;
2156 u32 ctrl; 2156 u32 ctrl;
2157 2157
2158 ret_val = phy->ops.check_reset_block(hw); 2158 if (phy->ops.check_reset_block) {
2159 if (ret_val) 2159 ret_val = phy->ops.check_reset_block(hw);
2160 return 0; 2160 if (ret_val)
2161 return 0;
2162 }
2161 2163
2162 ret_val = phy->ops.acquire(hw); 2164 ret_val = phy->ops.acquire(hw);
2163 if (ret_val) 2165 if (ret_val)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bf20457ea23a..17ad6a3c1be1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1390 union ixgbe_adv_rx_desc *rx_desc, 1390 union ixgbe_adv_rx_desc *rx_desc,
1391 struct sk_buff *skb) 1391 struct sk_buff *skb)
1392{ 1392{
1393 struct net_device *dev = rx_ring->netdev;
1394
1393 ixgbe_update_rsc_stats(rx_ring, skb); 1395 ixgbe_update_rsc_stats(rx_ring, skb);
1394 1396
1395 ixgbe_rx_hash(rx_ring, rx_desc, skb); 1397 ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1401 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); 1403 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
1402#endif 1404#endif
1403 1405
1404 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1406 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
1407 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1405 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1408 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1406 __vlan_hwaccel_put_tag(skb, vid); 1409 __vlan_hwaccel_put_tag(skb, vid);
1407 } 1410 }
1408 1411
1409 skb_record_rx_queue(skb, rx_ring->queue_index); 1412 skb_record_rx_queue(skb, rx_ring->queue_index);
1410 1413
1411 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1414 skb->protocol = eth_type_trans(skb, dev);
1412} 1415}
1413 1416
1414static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, 1417static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -3607,10 +3610,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3607 if (hw->mac.type == ixgbe_mac_82598EB) 3610 if (hw->mac.type == ixgbe_mac_82598EB)
3608 netif_set_gso_max_size(adapter->netdev, 32768); 3611 netif_set_gso_max_size(adapter->netdev, 32768);
3609 3612
3610
3611 /* Enable VLAN tag insert/strip */
3612 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
3613
3614 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3613 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3615 3614
3616#ifdef IXGBE_FCOE 3615#ifdef IXGBE_FCOE
@@ -6701,11 +6700,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6701{ 6700{
6702 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6701 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6703 6702
6704#ifdef CONFIG_DCB
6705 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
6706 features &= ~NETIF_F_HW_VLAN_RX;
6707#endif
6708
6709 /* return error if RXHASH is being enabled when RSS is not supported */ 6703 /* return error if RXHASH is being enabled when RSS is not supported */
6710 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 6704 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
6711 features &= ~NETIF_F_RXHASH; 6705 features &= ~NETIF_F_RXHASH;
@@ -6718,7 +6712,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6718 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 6712 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
6719 features &= ~NETIF_F_LRO; 6713 features &= ~NETIF_F_LRO;
6720 6714
6721
6722 return features; 6715 return features;
6723} 6716}
6724 6717
@@ -6766,6 +6759,11 @@ static int ixgbe_set_features(struct net_device *netdev,
6766 need_reset = true; 6759 need_reset = true;
6767 } 6760 }
6768 6761
6762 if (features & NETIF_F_HW_VLAN_RX)
6763 ixgbe_vlan_strip_enable(adapter);
6764 else
6765 ixgbe_vlan_strip_disable(adapter);
6766
6769 if (changed & NETIF_F_RXALL) 6767 if (changed & NETIF_F_RXALL)
6770 need_reset = true; 6768 need_reset = true;
6771 6769
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 04d901d0ff63..f0f06b2bc28b 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -436,7 +436,9 @@ struct mv643xx_eth_private {
436 /* 436 /*
437 * Hardware-specific parameters. 437 * Hardware-specific parameters.
438 */ 438 */
439#if defined(CONFIG_HAVE_CLK)
439 struct clk *clk; 440 struct clk *clk;
441#endif
440 unsigned int t_clk; 442 unsigned int t_clk;
441}; 443};
442 444
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2895 mp->dev = dev; 2897 mp->dev = dev;
2896 2898
2897 /* 2899 /*
2898 * Get the clk rate, if there is one, otherwise use the default. 2900 * Start with a default rate, and if there is a clock, allow
2901 * it to override the default.
2899 */ 2902 */
2903 mp->t_clk = 133000000;
2904#if defined(CONFIG_HAVE_CLK)
2900 mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); 2905 mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
2901 if (!IS_ERR(mp->clk)) { 2906 if (!IS_ERR(mp->clk)) {
2902 clk_prepare_enable(mp->clk); 2907 clk_prepare_enable(mp->clk);
2903 mp->t_clk = clk_get_rate(mp->clk); 2908 mp->t_clk = clk_get_rate(mp->clk);
2904 } else {
2905 mp->t_clk = 133000000;
2906 printk(KERN_WARNING "Unable to get clock");
2907 } 2909 }
2908 2910#endif
2909 set_params(mp, pd); 2911 set_params(mp, pd);
2910 netif_set_real_num_tx_queues(dev, mp->txq_count); 2912 netif_set_real_num_tx_queues(dev, mp->txq_count);
2911 netif_set_real_num_rx_queues(dev, mp->rxq_count); 2913 netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2995 phy_detach(mp->phy); 2997 phy_detach(mp->phy);
2996 cancel_work_sync(&mp->tx_timeout_task); 2998 cancel_work_sync(&mp->tx_timeout_task);
2997 2999
3000#if defined(CONFIG_HAVE_CLK)
2998 if (!IS_ERR(mp->clk)) { 3001 if (!IS_ERR(mp->clk)) {
2999 clk_disable_unprepare(mp->clk); 3002 clk_disable_unprepare(mp->clk);
3000 clk_put(mp->clk); 3003 clk_put(mp->clk);
3001 } 3004 }
3005#endif
3006
3002 free_netdev(mp->dev); 3007 free_netdev(mp->dev);
3003 3008
3004 platform_set_drvdata(pdev, NULL); 3009 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cace36f2ab92..28a54451a3e5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
4381 struct sky2_port *sky2 = netdev_priv(dev); 4381 struct sky2_port *sky2 = netdev_priv(dev);
4382 netdev_features_t changed = dev->features ^ features; 4382 netdev_features_t changed = dev->features ^ features;
4383 4383
4384 if (changed & NETIF_F_RXCSUM) { 4384 if ((changed & NETIF_F_RXCSUM) &&
4385 bool on = features & NETIF_F_RXCSUM; 4385 !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
4386 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), 4386 sky2_write32(sky2->hw,
4387 on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 4387 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
4388 (features & NETIF_F_RXCSUM)
4389 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
4388 } 4390 }
4389 4391
4390 if (changed & NETIF_F_RXHASH) 4392 if (changed & NETIF_F_RXHASH)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1bcead1fa2f6..842c8ce9494e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = {
617 .out_is_imm = false, 617 .out_is_imm = false,
618 .encode_slave_id = false, 618 .encode_slave_id = false,
619 .verify = NULL, 619 .verify = NULL,
620 .wrapper = NULL 620 .wrapper = mlx4_QUERY_FW_wrapper
621 }, 621 },
622 { 622 {
623 .opcode = MLX4_CMD_QUERY_HCA, 623 .opcode = MLX4_CMD_QUERY_HCA,
@@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = {
635 .out_is_imm = false, 635 .out_is_imm = false,
636 .encode_slave_id = false, 636 .encode_slave_id = false,
637 .verify = NULL, 637 .verify = NULL,
638 .wrapper = NULL 638 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
639 }, 639 },
640 { 640 {
641 .opcode = MLX4_CMD_QUERY_FUNC_CAP, 641 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 988b2424e1c6..69ba57270481 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
136 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; 136 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
137 struct mlx4_en_priv *priv; 137 struct mlx4_en_priv *priv;
138 138
139 if (!mdev->pndev[port])
140 return;
141
142 priv = netdev_priv(mdev->pndev[port]);
143 switch (event) { 139 switch (event) {
144 case MLX4_DEV_EVENT_PORT_UP: 140 case MLX4_DEV_EVENT_PORT_UP:
145 case MLX4_DEV_EVENT_PORT_DOWN: 141 case MLX4_DEV_EVENT_PORT_DOWN:
142 if (!mdev->pndev[port])
143 return;
144 priv = netdev_priv(mdev->pndev[port]);
146 /* To prevent races, we poll the link state in a separate 145 /* To prevent races, we poll the link state in a separate
147 task rather than changing it here */ 146 task rather than changing it here */
148 priv->link_state = event; 147 priv->link_state = event;
@@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
154 break; 153 break;
155 154
156 default: 155 default:
157 mlx4_warn(mdev, "Unhandled event: %d\n", event); 156 if (port < 1 || port > dev->caps.num_ports ||
157 !mdev->pndev[port])
158 return;
159 mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
158 } 160 }
159} 161}
160 162
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 3b6f8efbf141..bce98d9c0039 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -426,7 +426,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
426 426
427 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); 427 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
428 428
429 if (flr_slave > dev->num_slaves) { 429 if (flr_slave >= dev->num_slaves) {
430 mlx4_warn(dev, 430 mlx4_warn(dev,
431 "Got FLR for unknown function: %d\n", 431 "Got FLR for unknown function: %d\n",
432 flr_slave); 432 flr_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 68f5cd6cb3c7..9c83bb8151ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -412,7 +412,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
412 outbox = mailbox->buf; 412 outbox = mailbox->buf;
413 413
414 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 414 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
415 MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev)); 415 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
416 if (err) 416 if (err)
417 goto out; 417 goto out;
418 418
@@ -590,8 +590,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
590 590
591 for (i = 1; i <= dev_cap->num_ports; ++i) { 591 for (i = 1; i <= dev_cap->num_ports; ++i) {
592 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 592 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
593 MLX4_CMD_TIME_CLASS_B, 593 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
594 !mlx4_is_slave(dev));
595 if (err) 594 if (err)
596 goto out; 595 goto out;
597 596
@@ -669,6 +668,28 @@ out:
669 return err; 668 return err;
670} 669}
671 670
671int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
672 struct mlx4_vhcr *vhcr,
673 struct mlx4_cmd_mailbox *inbox,
674 struct mlx4_cmd_mailbox *outbox,
675 struct mlx4_cmd_info *cmd)
676{
677 int err = 0;
678 u8 field;
679
680 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
681 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
682 if (err)
683 return err;
684
685 /* For guests, report Blueflame disabled */
686 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
687 field &= 0x7f;
688 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
689
690 return 0;
691}
692
672int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 693int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
673 struct mlx4_vhcr *vhcr, 694 struct mlx4_vhcr *vhcr,
674 struct mlx4_cmd_mailbox *inbox, 695 struct mlx4_cmd_mailbox *inbox,
@@ -860,6 +881,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
860 ((fw_ver & 0xffff0000ull) >> 16) | 881 ((fw_ver & 0xffff0000ull) >> 16) |
861 ((fw_ver & 0x0000ffffull) << 16); 882 ((fw_ver & 0x0000ffffull) << 16);
862 883
884 if (mlx4_is_slave(dev))
885 goto out;
886
863 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 887 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
864 dev->caps.function = lg; 888 dev->caps.function = lg;
865 889
@@ -927,6 +951,27 @@ out:
927 return err; 951 return err;
928} 952}
929 953
954int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
955 struct mlx4_vhcr *vhcr,
956 struct mlx4_cmd_mailbox *inbox,
957 struct mlx4_cmd_mailbox *outbox,
958 struct mlx4_cmd_info *cmd)
959{
960 u8 *outbuf;
961 int err;
962
963 outbuf = outbox->buf;
964 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
965 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
966 if (err)
967 return err;
968
969 /* for slaves, zero out everything except FW version */
970 outbuf[0] = outbuf[1] = 0;
971 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
972 return 0;
973}
974
930static void get_board_id(void *vsd, char *board_id) 975static void get_board_id(void *vsd, char *board_id)
931{ 976{
932 int i; 977 int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2e024a68fa81..ee6f4fe00837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -142,12 +142,6 @@ struct mlx4_port_config {
142 struct pci_dev *pdev; 142 struct pci_dev *pdev;
143}; 143};
144 144
145static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
146{
147 return dev->caps.reserved_eqs +
148 MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
149}
150
151int mlx4_check_port_params(struct mlx4_dev *dev, 145int mlx4_check_port_params(struct mlx4_dev *dev,
152 enum mlx4_port_type *port_type) 146 enum mlx4_port_type *port_type)
153{ 147{
@@ -217,6 +211,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
217 } 211 }
218 212
219 dev->caps.num_ports = dev_cap->num_ports; 213 dev->caps.num_ports = dev_cap->num_ports;
214 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
220 for (i = 1; i <= dev->caps.num_ports; ++i) { 215 for (i = 1; i <= dev->caps.num_ports; ++i) {
221 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 216 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
222 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 217 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
@@ -435,12 +430,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
435 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 430 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
436 431
437 memset(&dev_cap, 0, sizeof(dev_cap)); 432 memset(&dev_cap, 0, sizeof(dev_cap));
433 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
438 err = mlx4_dev_cap(dev, &dev_cap); 434 err = mlx4_dev_cap(dev, &dev_cap);
439 if (err) { 435 if (err) {
440 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 436 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
441 return err; 437 return err;
442 } 438 }
443 439
440 err = mlx4_QUERY_FW(dev);
441 if (err)
442 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
443
444 page_size = ~dev->caps.page_size_cap + 1; 444 page_size = ~dev->caps.page_size_cap + 1;
445 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 445 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
446 if (page_size > PAGE_SIZE) { 446 if (page_size > PAGE_SIZE) {
@@ -485,15 +485,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
485 dev->caps.num_mgms = 0; 485 dev->caps.num_mgms = 0;
486 dev->caps.num_amgms = 0; 486 dev->caps.num_amgms = 0;
487 487
488 for (i = 1; i <= dev->caps.num_ports; ++i)
489 dev->caps.port_mask[i] = dev->caps.port_type[i];
490
491 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 488 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
492 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 489 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
493 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 490 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
494 return -ENODEV; 491 return -ENODEV;
495 } 492 }
496 493
494 for (i = 1; i <= dev->caps.num_ports; ++i)
495 dev->caps.port_mask[i] = dev->caps.port_type[i];
496
497 if (dev->caps.uar_page_size * (dev->caps.num_uars - 497 if (dev->caps.uar_page_size * (dev->caps.num_uars -
498 dev->caps.reserved_uars) > 498 dev->caps.reserved_uars) >
499 pci_resource_len(dev->pdev, 2)) { 499 pci_resource_len(dev->pdev, 2)) {
@@ -504,18 +504,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
504 return -ENODEV; 504 return -ENODEV;
505 } 505 }
506 506
507#if 0
508 mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
509 mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
510 dev->caps.num_uars, dev->caps.reserved_uars,
511 dev->caps.uar_page_size * dev->caps.num_uars,
512 pci_resource_len(dev->pdev, 2));
513 mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
514 dev->caps.reserved_eqs);
515 mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
516 dev->caps.num_pds, dev->caps.reserved_pds,
517 dev->caps.slave_pd_shift, dev->caps.pd_base);
518#endif
519 return 0; 507 return 0;
520} 508}
521 509
@@ -810,9 +798,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
810 if (err) 798 if (err)
811 goto err_srq; 799 goto err_srq;
812 800
813 num_eqs = (mlx4_is_master(dev)) ? 801 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
814 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 802 dev->caps.num_eqs;
815 dev->caps.num_eqs;
816 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 803 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
817 cmpt_base + 804 cmpt_base +
818 ((u64) (MLX4_CMPT_TYPE_EQ * 805 ((u64) (MLX4_CMPT_TYPE_EQ *
@@ -874,9 +861,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
874 } 861 }
875 862
876 863
877 num_eqs = (mlx4_is_master(dev)) ? 864 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
878 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 865 dev->caps.num_eqs;
879 dev->caps.num_eqs;
880 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 866 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
881 init_hca->eqc_base, dev_cap->eqc_entry_sz, 867 init_hca->eqc_base, dev_cap->eqc_entry_sz,
882 num_eqs, num_eqs, 0, 0); 868 num_eqs, num_eqs, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 86b6e5a2fabf..e5d20220762c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1039,6 +1039,11 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1039void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1039void mlx4_free_resource_tracker(struct mlx4_dev *dev,
1040 enum mlx4_res_tracker_free_type type); 1040 enum mlx4_res_tracker_free_type type);
1041 1041
1042int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1043 struct mlx4_vhcr *vhcr,
1044 struct mlx4_cmd_mailbox *inbox,
1045 struct mlx4_cmd_mailbox *outbox,
1046 struct mlx4_cmd_info *cmd);
1042int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 1047int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
1043 struct mlx4_vhcr *vhcr, 1048 struct mlx4_vhcr *vhcr,
1044 struct mlx4_cmd_mailbox *inbox, 1049 struct mlx4_cmd_mailbox *inbox,
@@ -1054,6 +1059,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1054 struct mlx4_cmd_mailbox *inbox, 1059 struct mlx4_cmd_mailbox *inbox,
1055 struct mlx4_cmd_mailbox *outbox, 1060 struct mlx4_cmd_mailbox *outbox,
1056 struct mlx4_cmd_info *cmd); 1061 struct mlx4_cmd_info *cmd);
1062int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1063 struct mlx4_vhcr *vhcr,
1064 struct mlx4_cmd_mailbox *inbox,
1065 struct mlx4_cmd_mailbox *outbox,
1066 struct mlx4_cmd_info *cmd);
1057int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1067int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1058 struct mlx4_vhcr *vhcr, 1068 struct mlx4_vhcr *vhcr,
1059 struct mlx4_cmd_mailbox *inbox, 1069 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 1fe2c7a8b40c..a8fb52992c64 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -697,10 +697,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
697 if (slave != dev->caps.function) 697 if (slave != dev->caps.function)
698 memset(inbox->buf, 0, 256); 698 memset(inbox->buf, 0, 256);
699 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 699 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
700 *(u8 *) inbox->buf = !!reset_qkey_viols << 6; 700 *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
701 ((__be32 *) inbox->buf)[2] = agg_cap_mask; 701 ((__be32 *) inbox->buf)[2] = agg_cap_mask;
702 } else { 702 } else {
703 ((u8 *) inbox->buf)[3] = !!reset_qkey_viols; 703 ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
704 ((__be32 *) inbox->buf)[1] = agg_cap_mask; 704 ((__be32 *) inbox->buf)[1] = agg_cap_mask;
705 } 705 }
706 706
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 06e5adeb76f7..b83bc928d52a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
126 profile[MLX4_RES_AUXC].num = request->num_qp; 126 profile[MLX4_RES_AUXC].num = request->num_qp;
127 profile[MLX4_RES_SRQ].num = request->num_srq; 127 profile[MLX4_RES_SRQ].num = request->num_srq;
128 profile[MLX4_RES_CQ].num = request->num_cq; 128 profile[MLX4_RES_CQ].num = request->num_cq;
129 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 129 profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
130 dev->phys_caps.num_phys_eqs :
131 min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
130 profile[MLX4_RES_DMPT].num = request->num_mpt; 132 profile[MLX4_RES_DMPT].num = request->num_mpt;
131 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 133 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
132 profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); 134 profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
@@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
215 init_hca->log_num_cqs = profile[i].log_num; 217 init_hca->log_num_cqs = profile[i].log_num;
216 break; 218 break;
217 case MLX4_RES_EQ: 219 case MLX4_RES_EQ:
218 dev->caps.num_eqs = profile[i].num; 220 dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
221 MAX_MSIX));
219 init_hca->eqc_base = profile[i].start; 222 init_hca->eqc_base = profile[i].start;
220 init_hca->log_num_eqs = profile[i].log_num; 223 init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
221 break; 224 break;
222 case MLX4_RES_DMPT: 225 case MLX4_RES_DMPT:
223 dev->caps.num_mpts = profile[i].num; 226 dev->caps.num_mpts = profile[i].num;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8d2666fcffd7..083d6715335c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
946 /* Update stats */ 946 /* Update stats */
947 ndev->stats.tx_packets++; 947 ndev->stats.tx_packets++;
948 ndev->stats.tx_bytes += skb->len; 948 ndev->stats.tx_bytes += skb->len;
949
950 /* Free buffer */
951 dev_kfree_skb_irq(skb);
952 } 949 }
950 dev_kfree_skb_irq(skb);
953 951
954 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); 952 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
955 } 953 }
956 954
957 if (netif_queue_stopped(ndev)) 955 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
958 netif_wake_queue(ndev); 956 if (netif_queue_stopped(ndev))
957 netif_wake_queue(ndev);
958 }
959} 959}
960 960
961static int __lpc_handle_recv(struct net_device *ndev, int budget) 961static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = {
1320 .ndo_set_rx_mode = lpc_eth_set_multicast_list, 1320 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1321 .ndo_do_ioctl = lpc_eth_ioctl, 1321 .ndo_do_ioctl = lpc_eth_ioctl,
1322 .ndo_set_mac_address = lpc_set_mac_address, 1322 .ndo_set_mac_address = lpc_set_mac_address,
1323 .ndo_change_mtu = eth_change_mtu,
1323}; 1324};
1324 1325
1325static int lpc_eth_drv_probe(struct platform_device *pdev) 1326static int lpc_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4de73643fec6..d1827e887f4e 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1096,20 +1096,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1096 if (err) { 1096 if (err) {
1097 dev_err(&pdev->dev, "32-bit PCI DMA addresses" 1097 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
1098 "not supported by the card\n"); 1098 "not supported by the card\n");
1099 goto err_out; 1099 goto err_out_disable_dev;
1100 } 1100 }
1101 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1101 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1102 if (err) { 1102 if (err) {
1103 dev_err(&pdev->dev, "32-bit PCI DMA addresses" 1103 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
1104 "not supported by the card\n"); 1104 "not supported by the card\n");
1105 goto err_out; 1105 goto err_out_disable_dev;
1106 } 1106 }
1107 1107
1108 /* IO Size check */ 1108 /* IO Size check */
1109 if (pci_resource_len(pdev, bar) < io_size) { 1109 if (pci_resource_len(pdev, bar) < io_size) {
1110 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); 1110 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
1111 err = -EIO; 1111 err = -EIO;
1112 goto err_out; 1112 goto err_out_disable_dev;
1113 } 1113 }
1114 1114
1115 pci_set_master(pdev); 1115 pci_set_master(pdev);
@@ -1117,7 +1117,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1117 dev = alloc_etherdev(sizeof(struct r6040_private)); 1117 dev = alloc_etherdev(sizeof(struct r6040_private));
1118 if (!dev) { 1118 if (!dev) {
1119 err = -ENOMEM; 1119 err = -ENOMEM;
1120 goto err_out; 1120 goto err_out_disable_dev;
1121 } 1121 }
1122 SET_NETDEV_DEV(dev, &pdev->dev); 1122 SET_NETDEV_DEV(dev, &pdev->dev);
1123 lp = netdev_priv(dev); 1123 lp = netdev_priv(dev);
@@ -1233,11 +1233,15 @@ err_out_mdio_irq:
1233err_out_mdio: 1233err_out_mdio:
1234 mdiobus_free(lp->mii_bus); 1234 mdiobus_free(lp->mii_bus);
1235err_out_unmap: 1235err_out_unmap:
1236 netif_napi_del(&lp->napi);
1237 pci_set_drvdata(pdev, NULL);
1236 pci_iounmap(pdev, ioaddr); 1238 pci_iounmap(pdev, ioaddr);
1237err_out_free_res: 1239err_out_free_res:
1238 pci_release_regions(pdev); 1240 pci_release_regions(pdev);
1239err_out_free_dev: 1241err_out_free_dev:
1240 free_netdev(dev); 1242 free_netdev(dev);
1243err_out_disable_dev:
1244 pci_disable_device(pdev);
1241err_out: 1245err_out:
1242 return err; 1246 return err;
1243} 1247}
@@ -1251,6 +1255,9 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
1251 mdiobus_unregister(lp->mii_bus); 1255 mdiobus_unregister(lp->mii_bus);
1252 kfree(lp->mii_bus->irq); 1256 kfree(lp->mii_bus->irq);
1253 mdiobus_free(lp->mii_bus); 1257 mdiobus_free(lp->mii_bus);
1258 netif_napi_del(&lp->napi);
1259 pci_set_drvdata(pdev, NULL);
1260 pci_iounmap(pdev, lp->base);
1254 pci_release_regions(pdev); 1261 pci_release_regions(pdev);
1255 free_netdev(dev); 1262 free_netdev(dev);
1256 pci_disable_device(pdev); 1263 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5eef290997f9..995d0cfc4c06 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp)
979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); 979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); 980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
981 981
982 cpw32_f(HiTxRingAddr, 0);
983 cpw32_f(HiTxRingAddr + 4, 0);
984
985 ring_dma = cp->ring_dma;
986 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
987 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
988
989 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
990 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
991 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
992
982 cp_start_hw(cp); 993 cp_start_hw(cp);
983 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ 994 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
984 995
@@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp)
992 1003
993 cpw8(Config5, cpr8(Config5) & PMEStatus); 1004 cpw8(Config5, cpr8(Config5) & PMEStatus);
994 1005
995 cpw32_f(HiTxRingAddr, 0);
996 cpw32_f(HiTxRingAddr + 4, 0);
997
998 ring_dma = cp->ring_dma;
999 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001
1002 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005
1006 cpw16(MultiIntr, 0); 1006 cpw16(MultiIntr, 0);
1007 1007
1008 cpw8_f(Cfg9346, Cfg9346_Lock); 1008 cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1636 1636
1637static void eeprom_cmd_end(void __iomem *ee_addr) 1637static void eeprom_cmd_end(void __iomem *ee_addr)
1638{ 1638{
1639 writeb (~EE_CS, ee_addr); 1639 writeb(0, ee_addr);
1640 eeprom_delay (); 1640 eeprom_delay ();
1641} 1641}
1642 1642
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 03df076ed596..1d83565cc6af 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l
1173 } 1173 }
1174 1174
1175 /* Terminate the EEPROM access. */ 1175 /* Terminate the EEPROM access. */
1176 RTL_W8 (Cfg9346, ~EE_CS); 1176 RTL_W8(Cfg9346, 0);
1177 eeprom_delay (); 1177 eeprom_delay ();
1178 1178
1179 return retval; 1179 return retval;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 00b4f56a671c..7260aa79466a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5889,11 +5889,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
5889 if (status & LinkChg) 5889 if (status & LinkChg)
5890 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); 5890 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5891 5891
5892 napi_disable(&tp->napi); 5892 rtl_irq_enable_all(tp);
5893 rtl_irq_disable(tp);
5894
5895 napi_enable(&tp->napi);
5896 napi_schedule(&tp->napi);
5897} 5893}
5898 5894
5899static void rtl_task(struct work_struct *work) 5895static void rtl_task(struct work_struct *work)
@@ -6345,6 +6341,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev)
6345 6341
6346 cancel_work_sync(&tp->wk.work); 6342 cancel_work_sync(&tp->wk.work);
6347 6343
6344 netif_napi_del(&tp->napi);
6345
6348 unregister_netdev(dev); 6346 unregister_netdev(dev);
6349 6347
6350 rtl_release_firmware(tp); 6348 rtl_release_firmware(tp);
@@ -6668,6 +6666,7 @@ out:
6668 return rc; 6666 return rc;
6669 6667
6670err_out_msi_4: 6668err_out_msi_4:
6669 netif_napi_del(&tp->napi);
6671 rtl_disable_msi(pdev, tp); 6670 rtl_disable_msi(pdev, tp);
6672 iounmap(ioaddr); 6671 iounmap(ioaddr);
6673err_out_free_res_3: 6672err_out_free_res_3:
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index be3c22179161..667169b82526 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1101,8 +1101,12 @@ static int sh_eth_rx(struct net_device *ndev)
1101 1101
1102 /* Restart Rx engine if stopped. */ 1102 /* Restart Rx engine if stopped. */
1103 /* If we don't need to check status, don't. -KDU */ 1103 /* If we don't need to check status, don't. -KDU */
1104 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) 1104 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1105 /* fix the values for the next receiving */
1106 mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) -
1107 sh_eth_read(ndev, RDLAR)) >> 4;
1105 sh_eth_write(ndev, EDRRR_R, EDRRR); 1108 sh_eth_write(ndev, EDRRR_R, EDRRR);
1109 }
1106 1110
1107 return 0; 1111 return 0;
1108} 1112}
@@ -1199,8 +1203,6 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1199 /* Receive Descriptor Empty int */ 1203 /* Receive Descriptor Empty int */
1200 ndev->stats.rx_over_errors++; 1204 ndev->stats.rx_over_errors++;
1201 1205
1202 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
1203 sh_eth_write(ndev, EDRRR_R, EDRRR);
1204 if (netif_msg_rx_err(mdp)) 1206 if (netif_msg_rx_err(mdp))
1205 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1207 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1206 } 1208 }
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index dab9c6f671ec..1466e5d2af44 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2390,11 +2390,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2390 2390
2391 retval = smsc911x_request_resources(pdev); 2391 retval = smsc911x_request_resources(pdev);
2392 if (retval) 2392 if (retval)
2393 goto out_return_resources; 2393 goto out_request_resources_fail;
2394 2394
2395 retval = smsc911x_enable_resources(pdev); 2395 retval = smsc911x_enable_resources(pdev);
2396 if (retval) 2396 if (retval)
2397 goto out_disable_resources; 2397 goto out_enable_resources_fail;
2398 2398
2399 if (pdata->ioaddr == NULL) { 2399 if (pdata->ioaddr == NULL) {
2400 SMSC_WARN(pdata, probe, "Error smsc911x base address invalid"); 2400 SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
@@ -2501,8 +2501,9 @@ out_free_irq:
2501 free_irq(dev->irq, dev); 2501 free_irq(dev->irq, dev);
2502out_disable_resources: 2502out_disable_resources:
2503 (void)smsc911x_disable_resources(pdev); 2503 (void)smsc911x_disable_resources(pdev);
2504out_return_resources: 2504out_enable_resources_fail:
2505 smsc911x_free_resources(pdev); 2505 smsc911x_free_resources(pdev);
2506out_request_resources_fail:
2506 platform_set_drvdata(pdev, NULL); 2507 platform_set_drvdata(pdev, NULL);
2507 iounmap(pdata->ioaddr); 2508 iounmap(pdata->ioaddr);
2508 free_netdev(dev); 2509 free_netdev(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 036428348faa..9f448279e12a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -13,7 +13,7 @@ config STMMAC_ETH
13if STMMAC_ETH 13if STMMAC_ETH
14 14
15config STMMAC_PLATFORM 15config STMMAC_PLATFORM
16 tristate "STMMAC platform bus support" 16 bool "STMMAC Platform bus support"
17 depends on STMMAC_ETH 17 depends on STMMAC_ETH
18 default y 18 default y
19 ---help--- 19 ---help---
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM
26 If unsure, say N. 26 If unsure, say N.
27 27
28config STMMAC_PCI 28config STMMAC_PCI
29 tristate "STMMAC support on PCI bus (EXPERIMENTAL)" 29 bool "STMMAC PCI bus support (EXPERIMENTAL)"
30 depends on STMMAC_ETH && PCI && EXPERIMENTAL 30 depends on STMMAC_ETH && PCI && EXPERIMENTAL
31 ---help--- 31 ---help---
32 This is to select the Synopsys DWMAC available on PCI devices, 32 This is to select the Synopsys DWMAC available on PCI devices,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 6b5d060ee9de..dc20c56efc9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -26,6 +26,7 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/stmmac.h> 27#include <linux/stmmac.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/pci.h>
29#include "common.h" 30#include "common.h"
30#ifdef CONFIG_STMMAC_TIMER 31#ifdef CONFIG_STMMAC_TIMER
31#include "stmmac_timer.h" 32#include "stmmac_timer.h"
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
95extern void stmmac_set_ethtool_ops(struct net_device *netdev); 96extern void stmmac_set_ethtool_ops(struct net_device *netdev);
96extern const struct stmmac_desc_ops enh_desc_ops; 97extern const struct stmmac_desc_ops enh_desc_ops;
97extern const struct stmmac_desc_ops ndesc_ops; 98extern const struct stmmac_desc_ops ndesc_ops;
98
99int stmmac_freeze(struct net_device *ndev); 99int stmmac_freeze(struct net_device *ndev);
100int stmmac_restore(struct net_device *ndev); 100int stmmac_restore(struct net_device *ndev);
101int stmmac_resume(struct net_device *ndev); 101int stmmac_resume(struct net_device *ndev);
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
109static inline int stmmac_clk_enable(struct stmmac_priv *priv) 109static inline int stmmac_clk_enable(struct stmmac_priv *priv)
110{ 110{
111 if (!IS_ERR(priv->stmmac_clk)) 111 if (!IS_ERR(priv->stmmac_clk))
112 return clk_enable(priv->stmmac_clk); 112 return clk_prepare_enable(priv->stmmac_clk);
113 113
114 return 0; 114 return 0;
115} 115}
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
119 if (IS_ERR(priv->stmmac_clk)) 119 if (IS_ERR(priv->stmmac_clk))
120 return; 120 return;
121 121
122 clk_disable(priv->stmmac_clk); 122 clk_disable_unprepare(priv->stmmac_clk);
123} 123}
124static inline int stmmac_clk_get(struct stmmac_priv *priv) 124static inline int stmmac_clk_get(struct stmmac_priv *priv)
125{ 125{
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
143 return 0; 143 return 0;
144} 144}
145#endif /* CONFIG_HAVE_CLK */ 145#endif /* CONFIG_HAVE_CLK */
146
147
148#ifdef CONFIG_STMMAC_PLATFORM
149extern struct platform_driver stmmac_pltfr_driver;
150static inline int stmmac_register_platform(void)
151{
152 int err;
153
154 err = platform_driver_register(&stmmac_pltfr_driver);
155 if (err)
156 pr_err("stmmac: failed to register the platform driver\n");
157
158 return err;
159}
160static inline void stmmac_unregister_platform(void)
161{
162 platform_driver_register(&stmmac_pltfr_driver);
163}
164#else
165static inline int stmmac_register_platform(void)
166{
167 pr_debug("stmmac: do not register the platf driver\n");
168
169 return -EINVAL;
170}
171static inline void stmmac_unregister_platform(void)
172{
173}
174#endif /* CONFIG_STMMAC_PLATFORM */
175
176#ifdef CONFIG_STMMAC_PCI
177extern struct pci_driver stmmac_pci_driver;
178static inline int stmmac_register_pci(void)
179{
180 int err;
181
182 err = pci_register_driver(&stmmac_pci_driver);
183 if (err)
184 pr_err("stmmac: failed to register the PCI driver\n");
185
186 return err;
187}
188static inline void stmmac_unregister_pci(void)
189{
190 pci_unregister_driver(&stmmac_pci_driver);
191}
192#else
193static inline int stmmac_register_pci(void)
194{
195 pr_debug("stmmac: do not register the PCI driver\n");
196
197 return -EINVAL;
198}
199static inline void stmmac_unregister_pci(void)
200{
201}
202#endif /* CONFIG_STMMAC_PCI */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 70966330f44e..51b3b68528ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
833 833
834/** 834/**
835 * stmmac_selec_desc_mode 835 * stmmac_selec_desc_mode
836 * @dev : device pointer 836 * @priv : private structure
837 * Description: select the Enhanced/Alternate or Normal descriptors */ 837 * Description: select the Enhanced/Alternate or Normal descriptors
838 */
838static void stmmac_selec_desc_mode(struct stmmac_priv *priv) 839static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
839{ 840{
840 if (priv->plat->enh_desc) { 841 if (priv->plat->enh_desc) {
@@ -1861,6 +1862,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1861/** 1862/**
1862 * stmmac_dvr_probe 1863 * stmmac_dvr_probe
1863 * @device: device pointer 1864 * @device: device pointer
1865 * @plat_dat: platform data pointer
1866 * @addr: iobase memory address
1864 * Description: this is the main probe function used to 1867 * Description: this is the main probe function used to
1865 * call the alloc_etherdev, allocate the priv structure. 1868 * call the alloc_etherdev, allocate the priv structure.
1866 */ 1869 */
@@ -2090,6 +2093,34 @@ int stmmac_restore(struct net_device *ndev)
2090} 2093}
2091#endif /* CONFIG_PM */ 2094#endif /* CONFIG_PM */
2092 2095
2096/* Driver can be configured w/ and w/ both PCI and Platf drivers
2097 * depending on the configuration selected.
2098 */
2099static int __init stmmac_init(void)
2100{
2101 int err_plt = 0;
2102 int err_pci = 0;
2103
2104 err_plt = stmmac_register_platform();
2105 err_pci = stmmac_register_pci();
2106
2107 if ((err_pci) && (err_plt)) {
2108 pr_err("stmmac: driver registration failed\n");
2109 return -EINVAL;
2110 }
2111
2112 return 0;
2113}
2114
2115static void __exit stmmac_exit(void)
2116{
2117 stmmac_unregister_platform();
2118 stmmac_unregister_pci();
2119}
2120
2121module_init(stmmac_init);
2122module_exit(stmmac_exit);
2123
2093#ifndef MODULE 2124#ifndef MODULE
2094static int __init stmmac_cmdline_opt(char *str) 2125static int __init stmmac_cmdline_opt(char *str)
2095{ 2126{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 58fab5303e9c..cf826e6b6aa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
179 179
180MODULE_DEVICE_TABLE(pci, stmmac_id_table); 180MODULE_DEVICE_TABLE(pci, stmmac_id_table);
181 181
182static struct pci_driver stmmac_driver = { 182struct pci_driver stmmac_pci_driver = {
183 .name = STMMAC_RESOURCE_NAME, 183 .name = STMMAC_RESOURCE_NAME,
184 .id_table = stmmac_id_table, 184 .id_table = stmmac_id_table,
185 .probe = stmmac_pci_probe, 185 .probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = {
190#endif 190#endif
191}; 191};
192 192
193/**
194 * stmmac_init_module - Entry point for the driver
195 * Description: This function is the entry point for the driver.
196 */
197static int __init stmmac_init_module(void)
198{
199 int ret;
200
201 ret = pci_register_driver(&stmmac_driver);
202 if (ret < 0)
203 pr_err("%s: ERROR: driver registration failed\n", __func__);
204
205 return ret;
206}
207
208/**
209 * stmmac_cleanup_module - Cleanup routine for the driver
210 * Description: This function is the cleanup routine for the driver.
211 */
212static void __exit stmmac_cleanup_module(void)
213{
214 pci_unregister_driver(&stmmac_driver);
215}
216
217module_init(stmmac_init_module);
218module_exit(stmmac_cleanup_module);
219
220MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); 193MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
221MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); 194MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
222MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 195MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3dd8f0803808..680d2b8dfe27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = {
255}; 255};
256MODULE_DEVICE_TABLE(of, stmmac_dt_ids); 256MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
257 257
258static struct platform_driver stmmac_driver = { 258struct platform_driver stmmac_pltfr_driver = {
259 .probe = stmmac_pltfr_probe, 259 .probe = stmmac_pltfr_probe,
260 .remove = stmmac_pltfr_remove, 260 .remove = stmmac_pltfr_remove,
261 .driver = { 261 .driver = {
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = {
266 }, 266 },
267}; 267};
268 268
269module_platform_driver(stmmac_driver);
270
271MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); 269MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
272MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 270MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
273MODULE_LICENSE("GPL"); 271MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 703c8cce2a2c..8c726b7004d3 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3598static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3598static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3599{ 3599{
3600 struct netdev_queue *txq; 3600 struct netdev_queue *txq;
3601 unsigned int tx_bytes;
3602 u16 pkt_cnt, tmp; 3601 u16 pkt_cnt, tmp;
3603 int cons, index; 3602 int cons, index;
3604 u64 cs; 3603 u64 cs;
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3621 netif_printk(np, tx_done, KERN_DEBUG, np->dev, 3620 netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3622 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); 3621 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3623 3622
3624 tx_bytes = 0; 3623 while (pkt_cnt--)
3625 tmp = pkt_cnt;
3626 while (tmp--) {
3627 tx_bytes += rp->tx_buffs[cons].skb->len;
3628 cons = release_tx_packet(np, rp, cons); 3624 cons = release_tx_packet(np, rp, cons);
3629 }
3630 3625
3631 rp->cons = cons; 3626 rp->cons = cons;
3632 smp_mb(); 3627 smp_mb();
3633 3628
3634 netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
3635
3636out: 3629out:
3637 if (unlikely(netif_tx_queue_stopped(txq) && 3630 if (unlikely(netif_tx_queue_stopped(txq) &&
3638 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3631 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np)
4333 struct tx_ring_info *rp = &np->tx_rings[i]; 4326 struct tx_ring_info *rp = &np->tx_rings[i];
4334 4327
4335 niu_free_tx_ring_info(np, rp); 4328 niu_free_tx_ring_info(np, rp);
4336 netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
4337 } 4329 }
4338 kfree(np->tx_rings); 4330 kfree(np->tx_rings);
4339 np->tx_rings = NULL; 4331 np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6739 prod = NEXT_TX(rp, prod); 6731 prod = NEXT_TX(rp, prod);
6740 } 6732 }
6741 6733
6742 netdev_tx_sent_queue(txq, skb->len);
6743
6744 if (prod < rp->prod) 6734 if (prod < rp->prod)
6745 rp->wrap_bit ^= TX_RING_KICK_WRAP; 6735 rp->wrap_bit ^= TX_RING_KICK_WRAP;
6746 rp->prod = prod; 6736 rp->prod = prod;
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 2d9218f86bca..098b1c42b393 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -7,6 +7,8 @@ config TILE_NET
7 depends on TILE 7 depends on TILE
8 default y 8 default y
9 select CRC32 9 select CRC32
10 select TILE_GXIO_MPIPE if TILEGX
11 select HIGH_RES_TIMERS if TILEGX
10 ---help--- 12 ---help---
11 This is a standard Linux network device driver for the 13 This is a standard Linux network device driver for the
12 on-chip Tilera Gigabit Ethernet and XAUI interfaces. 14 on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index f634f142cab4..0ef9eefd3211 100644
--- a/drivers/net/ethernet/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_TILE_NET) += tile_net.o 5obj-$(CONFIG_TILE_NET) += tile_net.o
6ifdef CONFIG_TILEGX 6ifdef CONFIG_TILEGX
7tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o 7tile_net-y := tilegx.o
8else 8else
9tile_net-objs := tilepro.o 9tile_net-y := tilepro.o
10endif 10endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644
index 000000000000..83b4b388ad49
--- /dev/null
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -0,0 +1,1898 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <linux/sched.h>
19#include <linux/kernel.h> /* printk() */
20#include <linux/slab.h> /* kmalloc() */
21#include <linux/errno.h> /* error codes */
22#include <linux/types.h> /* size_t */
23#include <linux/interrupt.h>
24#include <linux/in.h>
25#include <linux/irq.h>
26#include <linux/netdevice.h> /* struct device, and other headers */
27#include <linux/etherdevice.h> /* eth_type_trans */
28#include <linux/skbuff.h>
29#include <linux/ioctl.h>
30#include <linux/cdev.h>
31#include <linux/hugetlb.h>
32#include <linux/in6.h>
33#include <linux/timer.h>
34#include <linux/hrtimer.h>
35#include <linux/ktime.h>
36#include <linux/io.h>
37#include <linux/ctype.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40
41#include <asm/checksum.h>
42#include <asm/homecache.h>
43#include <gxio/mpipe.h>
44#include <arch/sim.h>
45
46/* Default transmit lockup timeout period, in jiffies. */
47#define TILE_NET_TIMEOUT (5 * HZ)
48
49/* The maximum number of distinct channels (idesc.channel is 5 bits). */
50#define TILE_NET_CHANNELS 32
51
52/* Maximum number of idescs to handle per "poll". */
53#define TILE_NET_BATCH 128
54
55/* Maximum number of packets to handle per "poll". */
56#define TILE_NET_WEIGHT 64
57
58/* Number of entries in each iqueue. */
59#define IQUEUE_ENTRIES 512
60
61/* Number of entries in each equeue. */
62#define EQUEUE_ENTRIES 2048
63
64/* Total header bytes per equeue slot. Must be big enough for 2 bytes
65 * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
66 * 60 bytes of actual TCP header. We round up to align to cache lines.
67 */
68#define HEADER_BYTES 128
69
70/* Maximum completions per cpu per device (must be a power of two).
71 * ISSUE: What is the right number here? If this is too small, then
72 * egress might block waiting for free space in a completions array.
73 * ISSUE: At the least, allocate these only for initialized echannels.
74 */
75#define TILE_NET_MAX_COMPS 64
76
77#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
78
79/* Size of completions data to allocate.
80 * ISSUE: Probably more than needed since we don't use all the channels.
81 */
82#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
83
84/* Size of NotifRing data to allocate. */
85#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
86
87/* Timeout to wake the per-device TX timer after we stop the queue.
88 * We don't want the timeout too short (adds overhead, and might end
89 * up causing stop/wake/stop/wake cycles) or too long (affects performance).
90 * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
91 */
92#define TX_TIMER_DELAY_USEC 30
93
94/* Timeout to wake the per-cpu egress timer to free completions. */
95#define EGRESS_TIMER_DELAY_USEC 1000
96
97MODULE_AUTHOR("Tilera Corporation");
98MODULE_LICENSE("GPL");
99
100/* A "packet fragment" (a chunk of memory). */
101struct frag {
102 void *buf;
103 size_t length;
104};
105
106/* A single completion. */
107struct tile_net_comp {
108 /* The "complete_count" when the completion will be complete. */
109 s64 when;
110 /* The buffer to be freed when the completion is complete. */
111 struct sk_buff *skb;
112};
113
114/* The completions for a given cpu and echannel. */
115struct tile_net_comps {
116 /* The completions. */
117 struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
118 /* The number of completions used. */
119 unsigned long comp_next;
120 /* The number of completions freed. */
121 unsigned long comp_last;
122};
123
124/* The transmit wake timer for a given cpu and echannel. */
125struct tile_net_tx_wake {
126 struct hrtimer timer;
127 struct net_device *dev;
128};
129
130/* Info for a specific cpu. */
131struct tile_net_info {
132 /* The NAPI struct. */
133 struct napi_struct napi;
134 /* Packet queue. */
135 gxio_mpipe_iqueue_t iqueue;
136 /* Our cpu. */
137 int my_cpu;
138 /* True if iqueue is valid. */
139 bool has_iqueue;
140 /* NAPI flags. */
141 bool napi_added;
142 bool napi_enabled;
143 /* Number of small sk_buffs which must still be provided. */
144 unsigned int num_needed_small_buffers;
145 /* Number of large sk_buffs which must still be provided. */
146 unsigned int num_needed_large_buffers;
147 /* A timer for handling egress completions. */
148 struct hrtimer egress_timer;
149 /* True if "egress_timer" is scheduled. */
150 bool egress_timer_scheduled;
151 /* Comps for each egress channel. */
152 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
153 /* Transmit wake timer for each egress channel. */
154 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
155};
156
157/* Info for egress on a particular egress channel. */
158struct tile_net_egress {
159 /* The "equeue". */
160 gxio_mpipe_equeue_t *equeue;
161 /* The headers for TSO. */
162 unsigned char *headers;
163};
164
165/* Info for a specific device. */
166struct tile_net_priv {
167 /* Our network device. */
168 struct net_device *dev;
169 /* The primary link. */
170 gxio_mpipe_link_t link;
171 /* The primary channel, if open, else -1. */
172 int channel;
173 /* The "loopify" egress link, if needed. */
174 gxio_mpipe_link_t loopify_link;
175 /* The "loopify" egress channel, if open, else -1. */
176 int loopify_channel;
177 /* The egress channel (channel or loopify_channel). */
178 int echannel;
179 /* Total stats. */
180 struct net_device_stats stats;
181};
182
183/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
184static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
185
186/* Devices currently associated with each channel.
187 * NOTE: The array entry can become NULL after ifconfig down, but
188 * we do not free the underlying net_device structures, so it is
189 * safe to use a pointer after reading it from this array.
190 */
191static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
192
193/* A mutex for "tile_net_devs_for_channel". */
194static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
195
196/* The per-cpu info. */
197static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
198
199/* The "context" for all devices. */
200static gxio_mpipe_context_t context;
201
202/* Buffer sizes and mpipe enum codes for buffer stacks.
203 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
204 */
205#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
206#define BUFFER_SIZE_SMALL 128
207#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
208#define BUFFER_SIZE_LARGE 1664
209
210/* The small/large "buffer stacks". */
211static int small_buffer_stack = -1;
212static int large_buffer_stack = -1;
213
214/* Amount of memory allocated for each buffer stack. */
215static size_t buffer_stack_size;
216
217/* The actual memory allocated for the buffer stacks. */
218static void *small_buffer_stack_va;
219static void *large_buffer_stack_va;
220
221/* The buckets. */
222static int first_bucket = -1;
223static int num_buckets = 1;
224
225/* The ingress irq. */
226static int ingress_irq = -1;
227
228/* Text value of tile_net.cpus if passed as a module parameter. */
229static char *network_cpus_string;
230
231/* The actual cpus in "network_cpus". */
232static struct cpumask network_cpus_map;
233
234/* If "loopify=LINK" was specified, this is "LINK". */
235static char *loopify_link_name;
236
237/* If "tile_net.custom" was specified, this is non-NULL. */
238static char *custom_str;
239
240/* The "tile_net.cpus" argument specifies the cpus that are dedicated
241 * to handle ingress packets.
242 *
243 * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
244 * m, n, x, y are integer numbers that represent the cpus that can be
245 * neither a dedicated cpu nor a dataplane cpu.
246 */
247static bool network_cpus_init(void)
248{
249 char buf[1024];
250 int rc;
251
252 if (network_cpus_string == NULL)
253 return false;
254
255 rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
256 if (rc != 0) {
257 pr_warn("tile_net.cpus=%s: malformed cpu list\n",
258 network_cpus_string);
259 return false;
260 }
261
262 /* Remove dedicated cpus. */
263 cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
264
265 if (cpumask_empty(&network_cpus_map)) {
266 pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
267 network_cpus_string);
268 return false;
269 }
270
271 cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
272 pr_info("Linux network CPUs: %s\n", buf);
273 return true;
274}
275
276module_param_named(cpus, network_cpus_string, charp, 0444);
277MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
278
279/* The "tile_net.loopify=LINK" argument causes the named device to
280 * actually use "loop0" for ingress, and "loop1" for egress. This
281 * allows an app to sit between the actual link and linux, passing
282 * (some) packets along to linux, and forwarding (some) packets sent
283 * out by linux.
284 */
285module_param_named(loopify, loopify_link_name, charp, 0444);
286MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
287
288/* The "tile_net.custom" argument causes us to ignore the "conventional"
289 * classifier metadata, in particular, the "l2_offset".
290 */
291module_param_named(custom, custom_str, charp, 0444);
292MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
293
294/* Atomically update a statistics field.
295 * Note that on TILE-Gx, this operation is fire-and-forget on the
296 * issuing core (single-cycle dispatch) and takes only a few cycles
297 * longer than a regular store when the request reaches the home cache.
298 * No expensive bus management overhead is required.
299 */
300static void tile_net_stats_add(unsigned long value, unsigned long *field)
301{
302 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
303 atomic_long_add(value, (atomic_long_t *)field);
304}
305
306/* Allocate and push a buffer. */
307static bool tile_net_provide_buffer(bool small)
308{
309 int stack = small ? small_buffer_stack : large_buffer_stack;
310 const unsigned long buffer_alignment = 128;
311 struct sk_buff *skb;
312 int len;
313
314 len = sizeof(struct sk_buff **) + buffer_alignment;
315 len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
316 skb = dev_alloc_skb(len);
317 if (skb == NULL)
318 return false;
319
320 /* Make room for a back-pointer to 'skb' and guarantee alignment. */
321 skb_reserve(skb, sizeof(struct sk_buff **));
322 skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
323
324 /* Save a back-pointer to 'skb'. */
325 *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
326
327 /* Make sure "skb" and the back-pointer have been flushed. */
328 wmb();
329
330 gxio_mpipe_push_buffer(&context, stack,
331 (void *)va_to_tile_io_addr(skb->data));
332
333 return true;
334}
335
336/* Convert a raw mpipe buffer to its matching skb pointer. */
337static struct sk_buff *mpipe_buf_to_skb(void *va)
338{
339 /* Acquire the associated "skb". */
340 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
341 struct sk_buff *skb = *skb_ptr;
342
343 /* Paranoia. */
344 if (skb->data != va) {
345 /* Panic here since there's a reasonable chance
346 * that corrupt buffers means generic memory
347 * corruption, with unpredictable system effects.
348 */
349 panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
350 va, skb, skb->data);
351 }
352
353 return skb;
354}
355
356static void tile_net_pop_all_buffers(int stack)
357{
358 for (;;) {
359 tile_io_addr_t addr =
360 (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
361 if (addr == 0)
362 break;
363 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
364 }
365}
366
367/* Provide linux buffers to mPIPE. */
368static void tile_net_provide_needed_buffers(void)
369{
370 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
371
372 while (info->num_needed_small_buffers != 0) {
373 if (!tile_net_provide_buffer(true))
374 goto oops;
375 info->num_needed_small_buffers--;
376 }
377
378 while (info->num_needed_large_buffers != 0) {
379 if (!tile_net_provide_buffer(false))
380 goto oops;
381 info->num_needed_large_buffers--;
382 }
383
384 return;
385
386oops:
387 /* Add a description to the page allocation failure dump. */
388 pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
389}
390
391static inline bool filter_packet(struct net_device *dev, void *buf)
392{
393 /* Filter packets received before we're up. */
394 if (dev == NULL || !(dev->flags & IFF_UP))
395 return true;
396
397 /* Filter out packets that aren't for us. */
398 if (!(dev->flags & IFF_PROMISC) &&
399 !is_multicast_ether_addr(buf) &&
400 compare_ether_addr(dev->dev_addr, buf) != 0)
401 return true;
402
403 return false;
404}
405
406static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
407 gxio_mpipe_idesc_t *idesc, unsigned long len)
408{
409 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
410 struct tile_net_priv *priv = netdev_priv(dev);
411
412 /* Encode the actual packet length. */
413 skb_put(skb, len);
414
415 skb->protocol = eth_type_trans(skb, dev);
416
417 /* Acknowledge "good" hardware checksums. */
418 if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
419 skb->ip_summed = CHECKSUM_UNNECESSARY;
420
421 netif_receive_skb(skb);
422
423 /* Update stats. */
424 tile_net_stats_add(1, &priv->stats.rx_packets);
425 tile_net_stats_add(len, &priv->stats.rx_bytes);
426
427 /* Need a new buffer. */
428 if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
429 info->num_needed_small_buffers++;
430 else
431 info->num_needed_large_buffers++;
432}
433
434/* Handle a packet. Return true if "processed", false if "filtered". */
435static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
436{
437 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
438 struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
439 uint8_t l2_offset;
440 void *va;
441 void *buf;
442 unsigned long len;
443 bool filter;
444
445 /* Drop packets for which no buffer was available.
446 * NOTE: This happens under heavy load.
447 */
448 if (idesc->be) {
449 struct tile_net_priv *priv = netdev_priv(dev);
450 tile_net_stats_add(1, &priv->stats.rx_dropped);
451 gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
452 if (net_ratelimit())
453 pr_info("Dropping packet (insufficient buffers).\n");
454 return false;
455 }
456
457 /* Get the "l2_offset", if allowed. */
458 l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
459
460 /* Get the raw buffer VA (includes "headroom"). */
461 va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
462
463 /* Get the actual packet start/length. */
464 buf = va + l2_offset;
465 len = idesc->l2_size - l2_offset;
466
467 /* Point "va" at the raw buffer. */
468 va -= NET_IP_ALIGN;
469
470 filter = filter_packet(dev, buf);
471 if (filter) {
472 gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
473 } else {
474 struct sk_buff *skb = mpipe_buf_to_skb(va);
475
476 /* Skip headroom, and any custom header. */
477 skb_reserve(skb, NET_IP_ALIGN + l2_offset);
478
479 tile_net_receive_skb(dev, skb, idesc, len);
480 }
481
482 gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
483 return !filter;
484}
485
486/* Handle some packets for the current CPU.
487 *
488 * This function handles up to TILE_NET_BATCH idescs per call.
489 *
490 * ISSUE: Since we do not provide new buffers until this function is
491 * complete, we must initially provide enough buffers for each network
492 * cpu to fill its iqueue and also its batched idescs.
493 *
494 * ISSUE: The "rotting packet" race condition occurs if a packet
495 * arrives after the queue appears to be empty, and before the
496 * hypervisor interrupt is re-enabled.
497 */
498static int tile_net_poll(struct napi_struct *napi, int budget)
499{
500 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
501 unsigned int work = 0;
502 gxio_mpipe_idesc_t *idesc;
503 int i, n;
504
505 /* Process packets. */
506 while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
507 for (i = 0; i < n; i++) {
508 if (i == TILE_NET_BATCH)
509 goto done;
510 if (tile_net_handle_packet(idesc + i)) {
511 if (++work >= budget)
512 goto done;
513 }
514 }
515 }
516
517 /* There are no packets left. */
518 napi_complete(&info->napi);
519
520 /* Re-enable hypervisor interrupts. */
521 gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
522
523 /* HACK: Avoid the "rotting packet" problem. */
524 if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
525 napi_schedule(&info->napi);
526
527 /* ISSUE: Handle completions? */
528
529done:
530 tile_net_provide_needed_buffers();
531
532 return work;
533}
534
535/* Handle an ingress interrupt on the current cpu. */
536static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
537{
538 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
539 napi_schedule(&info->napi);
540 return IRQ_HANDLED;
541}
542
543/* Free some completions. This must be called with interrupts blocked. */
544static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
545 struct tile_net_comps *comps,
546 int limit, bool force_update)
547{
548 int n = 0;
549 while (comps->comp_last < comps->comp_next) {
550 unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
551 struct tile_net_comp *comp = &comps->comp_queue[cid];
552 if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
553 force_update || n == 0))
554 break;
555 dev_kfree_skb_irq(comp->skb);
556 comps->comp_last++;
557 if (++n == limit)
558 break;
559 }
560 return n;
561}
562
563/* Add a completion. This must be called with interrupts blocked.
564 * tile_net_equeue_try_reserve() will have ensured a free completion entry.
565 */
566static void add_comp(gxio_mpipe_equeue_t *equeue,
567 struct tile_net_comps *comps,
568 uint64_t when, struct sk_buff *skb)
569{
570 int cid = comps->comp_next % TILE_NET_MAX_COMPS;
571 comps->comp_queue[cid].when = when;
572 comps->comp_queue[cid].skb = skb;
573 comps->comp_next++;
574}
575
576static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
577{
578 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
579 struct tile_net_priv *priv = netdev_priv(dev);
580
581 hrtimer_start(&info->tx_wake[priv->echannel].timer,
582 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
583 HRTIMER_MODE_REL_PINNED);
584}
585
586static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
587{
588 struct tile_net_tx_wake *tx_wake =
589 container_of(t, struct tile_net_tx_wake, timer);
590 netif_wake_subqueue(tx_wake->dev, smp_processor_id());
591 return HRTIMER_NORESTART;
592}
593
594/* Make sure the egress timer is scheduled. */
595static void tile_net_schedule_egress_timer(void)
596{
597 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
598
599 if (!info->egress_timer_scheduled) {
600 hrtimer_start(&info->egress_timer,
601 ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
602 HRTIMER_MODE_REL_PINNED);
603 info->egress_timer_scheduled = true;
604 }
605}
606
607/* The "function" for "info->egress_timer".
608 *
609 * This timer will reschedule itself as long as there are any pending
610 * completions expected for this tile.
611 */
612static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
613{
614 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
615 unsigned long irqflags;
616 bool pending = false;
617 int i;
618
619 local_irq_save(irqflags);
620
621 /* The timer is no longer scheduled. */
622 info->egress_timer_scheduled = false;
623
624 /* Free all possible comps for this tile. */
625 for (i = 0; i < TILE_NET_CHANNELS; i++) {
626 struct tile_net_egress *egress = &egress_for_echannel[i];
627 struct tile_net_comps *comps = info->comps_for_echannel[i];
628 if (comps->comp_last >= comps->comp_next)
629 continue;
630 tile_net_free_comps(egress->equeue, comps, -1, true);
631 pending = pending || (comps->comp_last < comps->comp_next);
632 }
633
634 /* Reschedule timer if needed. */
635 if (pending)
636 tile_net_schedule_egress_timer();
637
638 local_irq_restore(irqflags);
639
640 return HRTIMER_NORESTART;
641}
642
643/* Helper function for "tile_net_update()".
644 * "dev" (i.e. arg) is the device being brought up or down,
645 * or NULL if all devices are now down.
646 */
647static void tile_net_update_cpu(void *arg)
648{
649 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
650 struct net_device *dev = arg;
651
652 if (!info->has_iqueue)
653 return;
654
655 if (dev != NULL) {
656 if (!info->napi_added) {
657 netif_napi_add(dev, &info->napi,
658 tile_net_poll, TILE_NET_WEIGHT);
659 info->napi_added = true;
660 }
661 if (!info->napi_enabled) {
662 napi_enable(&info->napi);
663 info->napi_enabled = true;
664 }
665 enable_percpu_irq(ingress_irq, 0);
666 } else {
667 disable_percpu_irq(ingress_irq);
668 if (info->napi_enabled) {
669 napi_disable(&info->napi);
670 info->napi_enabled = false;
671 }
672 /* FIXME: Drain the iqueue. */
673 }
674}
675
676/* Helper function for tile_net_open() and tile_net_stop().
677 * Always called under tile_net_devs_for_channel_mutex.
678 */
679static int tile_net_update(struct net_device *dev)
680{
681 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
682 bool saw_channel = false;
683 int channel;
684 int rc;
685 int cpu;
686
687 gxio_mpipe_rules_init(&rules, &context);
688
689 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
690 if (tile_net_devs_for_channel[channel] == NULL)
691 continue;
692 if (!saw_channel) {
693 saw_channel = true;
694 gxio_mpipe_rules_begin(&rules, first_bucket,
695 num_buckets, NULL);
696 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
697 }
698 gxio_mpipe_rules_add_channel(&rules, channel);
699 }
700
701 /* NOTE: This can fail if there is no classifier.
702 * ISSUE: Can anything else cause it to fail?
703 */
704 rc = gxio_mpipe_rules_commit(&rules);
705 if (rc != 0) {
706 netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
707 return -EIO;
708 }
709
710 /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
711 for_each_online_cpu(cpu)
712 smp_call_function_single(cpu, tile_net_update_cpu,
713 (saw_channel ? dev : NULL), 1);
714
715 /* HACK: Allow packets to flow in the simulator. */
716 if (saw_channel)
717 sim_enable_mpipe_links(0, -1);
718
719 return 0;
720}
721
722/* Allocate and initialize mpipe buffer stacks, and register them in
723 * the mPIPE TLBs, for both small and large packet sizes.
724 * This routine supports tile_net_init_mpipe(), below.
725 */
726static int init_buffer_stacks(struct net_device *dev, int num_buffers)
727{
728 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
729 int rc;
730
731 /* Compute stack bytes; we round up to 64KB and then use
732 * alloc_pages() so we get the required 64KB alignment as well.
733 */
734 buffer_stack_size =
735 ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
736 64 * 1024);
737
738 /* Allocate two buffer stack indices. */
739 rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
740 if (rc < 0) {
741 netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
742 rc);
743 return rc;
744 }
745 small_buffer_stack = rc;
746 large_buffer_stack = rc + 1;
747
748 /* Allocate the small memory stack. */
749 small_buffer_stack_va =
750 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
751 if (small_buffer_stack_va == NULL) {
752 netdev_err(dev,
753 "Could not alloc %zd bytes for buffer stacks\n",
754 buffer_stack_size);
755 return -ENOMEM;
756 }
757 rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
758 BUFFER_SIZE_SMALL_ENUM,
759 small_buffer_stack_va,
760 buffer_stack_size, 0);
761 if (rc != 0) {
762 netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
763 return rc;
764 }
765 rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
766 hash_pte, 0);
767 if (rc != 0) {
768 netdev_err(dev,
769 "gxio_mpipe_register_buffer_memory failed: %d\n",
770 rc);
771 return rc;
772 }
773
774 /* Allocate the large buffer stack. */
775 large_buffer_stack_va =
776 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
777 if (large_buffer_stack_va == NULL) {
778 netdev_err(dev,
779 "Could not alloc %zd bytes for buffer stacks\n",
780 buffer_stack_size);
781 return -ENOMEM;
782 }
783 rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
784 BUFFER_SIZE_LARGE_ENUM,
785 large_buffer_stack_va,
786 buffer_stack_size, 0);
787 if (rc != 0) {
788 netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
789 rc);
790 return rc;
791 }
792 rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
793 hash_pte, 0);
794 if (rc != 0) {
795 netdev_err(dev,
796 "gxio_mpipe_register_buffer_memory failed: %d\n",
797 rc);
798 return rc;
799 }
800
801 return 0;
802}
803
804/* Allocate per-cpu resources (memory for completions and idescs).
805 * This routine supports tile_net_init_mpipe(), below.
806 */
807static int alloc_percpu_mpipe_resources(struct net_device *dev,
808 int cpu, int ring)
809{
810 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
811 int order, i, rc;
812 struct page *page;
813 void *addr;
814
815 /* Allocate the "comps". */
816 order = get_order(COMPS_SIZE);
817 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
818 if (page == NULL) {
819 netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
820 COMPS_SIZE);
821 return -ENOMEM;
822 }
823 addr = pfn_to_kaddr(page_to_pfn(page));
824 memset(addr, 0, COMPS_SIZE);
825 for (i = 0; i < TILE_NET_CHANNELS; i++)
826 info->comps_for_echannel[i] =
827 addr + i * sizeof(struct tile_net_comps);
828
829 /* If this is a network cpu, create an iqueue. */
830 if (cpu_isset(cpu, network_cpus_map)) {
831 order = get_order(NOTIF_RING_SIZE);
832 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
833 if (page == NULL) {
834 netdev_err(dev,
835 "Failed to alloc %zd bytes iqueue memory\n",
836 NOTIF_RING_SIZE);
837 return -ENOMEM;
838 }
839 addr = pfn_to_kaddr(page_to_pfn(page));
840 rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
841 addr, NOTIF_RING_SIZE, 0);
842 if (rc < 0) {
843 netdev_err(dev,
844 "gxio_mpipe_iqueue_init failed: %d\n", rc);
845 return rc;
846 }
847 info->has_iqueue = true;
848 }
849
850 return ring;
851}
852
853/* Initialize NotifGroup and buckets.
854 * This routine supports tile_net_init_mpipe(), below.
855 */
856static int init_notif_group_and_buckets(struct net_device *dev,
857 int ring, int network_cpus_count)
858{
859 int group, rc;
860
861 /* Allocate one NotifGroup. */
862 rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
863 if (rc < 0) {
864 netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
865 rc);
866 return rc;
867 }
868 group = rc;
869
870 /* Initialize global num_buckets value. */
871 if (network_cpus_count > 4)
872 num_buckets = 256;
873 else if (network_cpus_count > 1)
874 num_buckets = 16;
875
876 /* Allocate some buckets, and set global first_bucket value. */
877 rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
878 if (rc < 0) {
879 netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
880 return rc;
881 }
882 first_bucket = rc;
883
884 /* Init group and buckets. */
885 rc = gxio_mpipe_init_notif_group_and_buckets(
886 &context, group, ring, network_cpus_count,
887 first_bucket, num_buckets,
888 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
889 if (rc != 0) {
890 netdev_err(
891 dev,
892 "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
893 rc);
894 return rc;
895 }
896
897 return 0;
898}
899
900/* Create an irq and register it, then activate the irq and request
901 * interrupts on all cores. Note that "ingress_irq" being initialized
902 * is how we know not to call tile_net_init_mpipe() again.
903 * This routine supports tile_net_init_mpipe(), below.
904 */
905static int tile_net_setup_interrupts(struct net_device *dev)
906{
907 int cpu, rc;
908
909 rc = create_irq();
910 if (rc < 0) {
911 netdev_err(dev, "create_irq failed: %d\n", rc);
912 return rc;
913 }
914 ingress_irq = rc;
915 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
916 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
917 0, NULL, NULL);
918 if (rc != 0) {
919 netdev_err(dev, "request_irq failed: %d\n", rc);
920 destroy_irq(ingress_irq);
921 ingress_irq = -1;
922 return rc;
923 }
924
925 for_each_online_cpu(cpu) {
926 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
927 if (info->has_iqueue) {
928 gxio_mpipe_request_notif_ring_interrupt(
929 &context, cpu_x(cpu), cpu_y(cpu),
930 1, ingress_irq, info->iqueue.ring);
931 }
932 }
933
934 return 0;
935}
936
937/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
938static void tile_net_init_mpipe_fail(void)
939{
940 int cpu;
941
942 /* Do cleanups that require the mpipe context first. */
943 if (small_buffer_stack >= 0)
944 tile_net_pop_all_buffers(small_buffer_stack);
945 if (large_buffer_stack >= 0)
946 tile_net_pop_all_buffers(large_buffer_stack);
947
948 /* Destroy mpipe context so the hardware no longer owns any memory. */
949 gxio_mpipe_destroy(&context);
950
951 for_each_online_cpu(cpu) {
952 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
953 free_pages((unsigned long)(info->comps_for_echannel[0]),
954 get_order(COMPS_SIZE));
955 info->comps_for_echannel[0] = NULL;
956 free_pages((unsigned long)(info->iqueue.idescs),
957 get_order(NOTIF_RING_SIZE));
958 info->iqueue.idescs = NULL;
959 }
960
961 if (small_buffer_stack_va)
962 free_pages_exact(small_buffer_stack_va, buffer_stack_size);
963 if (large_buffer_stack_va)
964 free_pages_exact(large_buffer_stack_va, buffer_stack_size);
965
966 small_buffer_stack_va = NULL;
967 large_buffer_stack_va = NULL;
968 large_buffer_stack = -1;
969 small_buffer_stack = -1;
970 first_bucket = -1;
971}
972
973/* The first time any tilegx network device is opened, we initialize
974 * the global mpipe state. If this step fails, we fail to open the
975 * device, but if it succeeds, we never need to do it again, and since
976 * tile_net can't be unloaded, we never undo it.
977 *
978 * Note that some resources in this path (buffer stack indices,
979 * bindings from init_buffer_stack, etc.) are hypervisor resources
980 * that are freed implicitly by gxio_mpipe_destroy().
981 */
982static int tile_net_init_mpipe(struct net_device *dev)
983{
984 int i, num_buffers, rc;
985 int cpu;
986 int first_ring, ring;
987 int network_cpus_count = cpus_weight(network_cpus_map);
988
989 if (!hash_default) {
990 netdev_err(dev, "Networking requires hash_default!\n");
991 return -EIO;
992 }
993
994 rc = gxio_mpipe_init(&context, 0);
995 if (rc != 0) {
996 netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
997 return -EIO;
998 }
999
1000 /* Set up the buffer stacks. */
1001 num_buffers =
1002 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1003 rc = init_buffer_stacks(dev, num_buffers);
1004 if (rc != 0)
1005 goto fail;
1006
1007 /* Provide initial buffers. */
1008 rc = -ENOMEM;
1009 for (i = 0; i < num_buffers; i++) {
1010 if (!tile_net_provide_buffer(true)) {
1011 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1012 goto fail;
1013 }
1014 }
1015 for (i = 0; i < num_buffers; i++) {
1016 if (!tile_net_provide_buffer(false)) {
1017 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1018 goto fail;
1019 }
1020 }
1021
1022 /* Allocate one NotifRing for each network cpu. */
1023 rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
1024 if (rc < 0) {
1025 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
1026 rc);
1027 goto fail;
1028 }
1029
1030 /* Init NotifRings per-cpu. */
1031 first_ring = rc;
1032 ring = first_ring;
1033 for_each_online_cpu(cpu) {
1034 rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
1035 if (rc < 0)
1036 goto fail;
1037 ring = rc;
1038 }
1039
1040 /* Initialize NotifGroup and buckets. */
1041 rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
1042 if (rc != 0)
1043 goto fail;
1044
1045 /* Create and enable interrupts. */
1046 rc = tile_net_setup_interrupts(dev);
1047 if (rc != 0)
1048 goto fail;
1049
1050 return 0;
1051
1052fail:
1053 tile_net_init_mpipe_fail();
1054 return rc;
1055}
1056
1057/* Create persistent egress info for a given egress channel.
1058 * Note that this may be shared between, say, "gbe0" and "xgbe0".
1059 * ISSUE: Defer header allocation until TSO is actually needed?
1060 */
1061static int tile_net_init_egress(struct net_device *dev, int echannel)
1062{
1063 struct page *headers_page, *edescs_page, *equeue_page;
1064 gxio_mpipe_edesc_t *edescs;
1065 gxio_mpipe_equeue_t *equeue;
1066 unsigned char *headers;
1067 int headers_order, edescs_order, equeue_order;
1068 size_t edescs_size;
1069 int edma;
1070 int rc = -ENOMEM;
1071
1072 /* Only initialize once. */
1073 if (egress_for_echannel[echannel].equeue != NULL)
1074 return 0;
1075
1076 /* Allocate memory for the "headers". */
1077 headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
1078 headers_page = alloc_pages(GFP_KERNEL, headers_order);
1079 if (headers_page == NULL) {
1080 netdev_warn(dev,
1081 "Could not alloc %zd bytes for TSO headers.\n",
1082 PAGE_SIZE << headers_order);
1083 goto fail;
1084 }
1085 headers = pfn_to_kaddr(page_to_pfn(headers_page));
1086
1087 /* Allocate memory for the "edescs". */
1088 edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
1089 edescs_order = get_order(edescs_size);
1090 edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
1091 if (edescs_page == NULL) {
1092 netdev_warn(dev,
1093 "Could not alloc %zd bytes for eDMA ring.\n",
1094 edescs_size);
1095 goto fail_headers;
1096 }
1097 edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
1098
1099 /* Allocate memory for the "equeue". */
1100 equeue_order = get_order(sizeof(*equeue));
1101 equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
1102 if (equeue_page == NULL) {
1103 netdev_warn(dev,
1104 "Could not alloc %zd bytes for equeue info.\n",
1105 PAGE_SIZE << equeue_order);
1106 goto fail_edescs;
1107 }
1108 equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
1109
1110 /* Allocate an edma ring. Note that in practice this can't
1111 * fail, which is good, because we will leak an edma ring if so.
1112 */
1113 rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
1114 if (rc < 0) {
1115 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
1116 rc);
1117 goto fail_equeue;
1118 }
1119 edma = rc;
1120
1121 /* Initialize the equeue. */
1122 rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
1123 edescs, edescs_size, 0);
1124 if (rc != 0) {
1125 netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
1126 goto fail_equeue;
1127 }
1128
1129 /* Done. */
1130 egress_for_echannel[echannel].equeue = equeue;
1131 egress_for_echannel[echannel].headers = headers;
1132 return 0;
1133
1134fail_equeue:
1135 __free_pages(equeue_page, equeue_order);
1136
1137fail_edescs:
1138 __free_pages(edescs_page, edescs_order);
1139
1140fail_headers:
1141 __free_pages(headers_page, headers_order);
1142
1143fail:
1144 return rc;
1145}
1146
1147/* Return channel number for a newly-opened link. */
1148static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1149 const char *link_name)
1150{
1151 int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
1152 if (rc < 0) {
1153 netdev_err(dev, "Failed to open '%s'\n", link_name);
1154 return rc;
1155 }
1156 rc = gxio_mpipe_link_channel(link);
1157 if (rc < 0 || rc >= TILE_NET_CHANNELS) {
1158 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
1159 gxio_mpipe_link_close(link);
1160 return -EINVAL;
1161 }
1162 return rc;
1163}
1164
1165/* Help the kernel activate the given network interface. */
1166static int tile_net_open(struct net_device *dev)
1167{
1168 struct tile_net_priv *priv = netdev_priv(dev);
1169 int cpu, rc;
1170
1171 mutex_lock(&tile_net_devs_for_channel_mutex);
1172
1173 /* Do one-time initialization the first time any device is opened. */
1174 if (ingress_irq < 0) {
1175 rc = tile_net_init_mpipe(dev);
1176 if (rc != 0)
1177 goto fail;
1178 }
1179
1180 /* Determine if this is the "loopify" device. */
1181 if (unlikely((loopify_link_name != NULL) &&
1182 !strcmp(dev->name, loopify_link_name))) {
1183 rc = tile_net_link_open(dev, &priv->link, "loop0");
1184 if (rc < 0)
1185 goto fail;
1186 priv->channel = rc;
1187 rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
1188 if (rc < 0)
1189 goto fail;
1190 priv->loopify_channel = rc;
1191 priv->echannel = rc;
1192 } else {
1193 rc = tile_net_link_open(dev, &priv->link, dev->name);
1194 if (rc < 0)
1195 goto fail;
1196 priv->channel = rc;
1197 priv->echannel = rc;
1198 }
1199
1200 /* Initialize egress info (if needed). Once ever, per echannel. */
1201 rc = tile_net_init_egress(dev, priv->echannel);
1202 if (rc != 0)
1203 goto fail;
1204
1205 tile_net_devs_for_channel[priv->channel] = dev;
1206
1207 rc = tile_net_update(dev);
1208 if (rc != 0)
1209 goto fail;
1210
1211 mutex_unlock(&tile_net_devs_for_channel_mutex);
1212
1213 /* Initialize the transmit wake timer for this device for each cpu. */
1214 for_each_online_cpu(cpu) {
1215 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1216 struct tile_net_tx_wake *tx_wake =
1217 &info->tx_wake[priv->echannel];
1218
1219 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
1220 HRTIMER_MODE_REL);
1221 tx_wake->timer.function = tile_net_handle_tx_wake_timer;
1222 tx_wake->dev = dev;
1223 }
1224
1225 for_each_online_cpu(cpu)
1226 netif_start_subqueue(dev, cpu);
1227 netif_carrier_on(dev);
1228 return 0;
1229
1230fail:
1231 if (priv->loopify_channel >= 0) {
1232 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1233 netdev_warn(dev, "Failed to close loopify link!\n");
1234 priv->loopify_channel = -1;
1235 }
1236 if (priv->channel >= 0) {
1237 if (gxio_mpipe_link_close(&priv->link) != 0)
1238 netdev_warn(dev, "Failed to close link!\n");
1239 priv->channel = -1;
1240 }
1241 priv->echannel = -1;
1242 tile_net_devs_for_channel[priv->channel] = NULL;
1243 mutex_unlock(&tile_net_devs_for_channel_mutex);
1244
1245 /* Don't return raw gxio error codes to generic Linux. */
1246 return (rc > -512) ? rc : -EIO;
1247}
1248
1249/* Help the kernel deactivate the given network interface. */
1250static int tile_net_stop(struct net_device *dev)
1251{
1252 struct tile_net_priv *priv = netdev_priv(dev);
1253 int cpu;
1254
1255 for_each_online_cpu(cpu) {
1256 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1257 struct tile_net_tx_wake *tx_wake =
1258 &info->tx_wake[priv->echannel];
1259
1260 hrtimer_cancel(&tx_wake->timer);
1261 netif_stop_subqueue(dev, cpu);
1262 }
1263
1264 mutex_lock(&tile_net_devs_for_channel_mutex);
1265 tile_net_devs_for_channel[priv->channel] = NULL;
1266 (void)tile_net_update(dev);
1267 if (priv->loopify_channel >= 0) {
1268 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1269 netdev_warn(dev, "Failed to close loopify link!\n");
1270 priv->loopify_channel = -1;
1271 }
1272 if (priv->channel >= 0) {
1273 if (gxio_mpipe_link_close(&priv->link) != 0)
1274 netdev_warn(dev, "Failed to close link!\n");
1275 priv->channel = -1;
1276 }
1277 priv->echannel = -1;
1278 mutex_unlock(&tile_net_devs_for_channel_mutex);
1279
1280 return 0;
1281}
1282
1283/* Determine the VA for a fragment. */
1284static inline void *tile_net_frag_buf(skb_frag_t *f)
1285{
1286 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1287 return pfn_to_kaddr(pfn) + f->page_offset;
1288}
1289
1290/* Acquire a completion entry and an egress slot, or if we can't,
1291 * stop the queue and schedule the tx_wake timer.
1292 */
1293static s64 tile_net_equeue_try_reserve(struct net_device *dev,
1294 struct tile_net_comps *comps,
1295 gxio_mpipe_equeue_t *equeue,
1296 int num_edescs)
1297{
1298 /* Try to acquire a completion entry. */
1299 if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
1300 tile_net_free_comps(equeue, comps, 32, false) != 0) {
1301
1302 /* Try to acquire an egress slot. */
1303 s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1304 if (slot >= 0)
1305 return slot;
1306
1307 /* Freeing some completions gives the equeue time to drain. */
1308 tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
1309
1310 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1311 if (slot >= 0)
1312 return slot;
1313 }
1314
1315 /* Still nothing; give up and stop the queue for a short while. */
1316 netif_stop_subqueue(dev, smp_processor_id());
1317 tile_net_schedule_tx_wake_timer(dev);
1318 return -1;
1319}
1320
1321/* Determine how many edesc's are needed for TSO.
1322 *
1323 * Sometimes, if "sendfile()" requires copying, we will be called with
1324 * "data" containing the header and payload, with "frags" being empty.
1325 * Sometimes, for example when using NFS over TCP, a single segment can
1326 * span 3 fragments. This requires special care.
1327 */
1328static int tso_count_edescs(struct sk_buff *skb)
1329{
1330 struct skb_shared_info *sh = skb_shinfo(skb);
1331 unsigned int data_len = skb->data_len;
1332 unsigned int p_len = sh->gso_size;
1333 long f_id = -1; /* id of the current fragment */
1334 long f_size = -1; /* size of the current fragment */
1335 long f_used = -1; /* bytes used from the current fragment */
1336 long n; /* size of the current piece of payload */
1337 int num_edescs = 0;
1338 int segment;
1339
1340 for (segment = 0; segment < sh->gso_segs; segment++) {
1341
1342 unsigned int p_used = 0;
1343
1344 /* One edesc for header and for each piece of the payload. */
1345 for (num_edescs++; p_used < p_len; num_edescs++) {
1346
1347 /* Advance as needed. */
1348 while (f_used >= f_size) {
1349 f_id++;
1350 f_size = sh->frags[f_id].size;
1351 f_used = 0;
1352 }
1353
1354 /* Use bytes from the current fragment. */
1355 n = p_len - p_used;
1356 if (n > f_size - f_used)
1357 n = f_size - f_used;
1358 f_used += n;
1359 p_used += n;
1360 }
1361
1362 /* The last segment may be less than gso_size. */
1363 data_len -= p_len;
1364 if (data_len < p_len)
1365 p_len = data_len;
1366 }
1367
1368 return num_edescs;
1369}
1370
1371/* Prepare modified copies of the skbuff headers.
1372 * FIXME: add support for IPv6.
1373 */
1374static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1375 s64 slot)
1376{
1377 struct skb_shared_info *sh = skb_shinfo(skb);
1378 struct iphdr *ih;
1379 struct tcphdr *th;
1380 unsigned int data_len = skb->data_len;
1381 unsigned char *data = skb->data;
1382 unsigned int ih_off, th_off, sh_len, p_len;
1383 unsigned int isum_seed, tsum_seed, id, seq;
1384 long f_id = -1; /* id of the current fragment */
1385 long f_size = -1; /* size of the current fragment */
1386 long f_used = -1; /* bytes used from the current fragment */
1387 long n; /* size of the current piece of payload */
1388 int segment;
1389
1390 /* Locate original headers and compute various lengths. */
1391 ih = ip_hdr(skb);
1392 th = tcp_hdr(skb);
1393 ih_off = skb_network_offset(skb);
1394 th_off = skb_transport_offset(skb);
1395 sh_len = th_off + tcp_hdrlen(skb);
1396 p_len = sh->gso_size;
1397
1398 /* Set up seed values for IP and TCP csum and initialize id and seq. */
1399 isum_seed = ((0xFFFF - ih->check) +
1400 (0xFFFF - ih->tot_len) +
1401 (0xFFFF - ih->id));
1402 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1403 id = ntohs(ih->id);
1404 seq = ntohl(th->seq);
1405
1406 /* Prepare all the headers. */
1407 for (segment = 0; segment < sh->gso_segs; segment++) {
1408 unsigned char *buf;
1409 unsigned int p_used = 0;
1410
1411 /* Copy to the header memory for this segment. */
1412 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1413 NET_IP_ALIGN;
1414 memcpy(buf, data, sh_len);
1415
1416 /* Update copied ip header. */
1417 ih = (struct iphdr *)(buf + ih_off);
1418 ih->tot_len = htons(sh_len + p_len - ih_off);
1419 ih->id = htons(id);
1420 ih->check = csum_long(isum_seed + ih->tot_len +
1421 ih->id) ^ 0xffff;
1422
1423 /* Update copied tcp header. */
1424 th = (struct tcphdr *)(buf + th_off);
1425 th->seq = htonl(seq);
1426 th->check = csum_long(tsum_seed + htons(sh_len + p_len));
1427 if (segment != sh->gso_segs - 1) {
1428 th->fin = 0;
1429 th->psh = 0;
1430 }
1431
1432 /* Skip past the header. */
1433 slot++;
1434
1435 /* Skip past the payload. */
1436 while (p_used < p_len) {
1437
1438 /* Advance as needed. */
1439 while (f_used >= f_size) {
1440 f_id++;
1441 f_size = sh->frags[f_id].size;
1442 f_used = 0;
1443 }
1444
1445 /* Use bytes from the current fragment. */
1446 n = p_len - p_used;
1447 if (n > f_size - f_used)
1448 n = f_size - f_used;
1449 f_used += n;
1450 p_used += n;
1451
1452 slot++;
1453 }
1454
1455 id++;
1456 seq += p_len;
1457
1458 /* The last segment may be less than gso_size. */
1459 data_len -= p_len;
1460 if (data_len < p_len)
1461 p_len = data_len;
1462 }
1463
1464 /* Flush the headers so they are ready for hardware DMA. */
1465 wmb();
1466}
1467
1468/* Pass all the data to mpipe for egress. */
1469static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1470 struct sk_buff *skb, unsigned char *headers, s64 slot)
1471{
1472 struct tile_net_priv *priv = netdev_priv(dev);
1473 struct skb_shared_info *sh = skb_shinfo(skb);
1474 unsigned int data_len = skb->data_len;
1475 unsigned int p_len = sh->gso_size;
1476 gxio_mpipe_edesc_t edesc_head = { { 0 } };
1477 gxio_mpipe_edesc_t edesc_body = { { 0 } };
1478 long f_id = -1; /* id of the current fragment */
1479 long f_size = -1; /* size of the current fragment */
1480 long f_used = -1; /* bytes used from the current fragment */
1481 long n; /* size of the current piece of payload */
1482 unsigned long tx_packets = 0, tx_bytes = 0;
1483 unsigned int csum_start, sh_len;
1484 int segment;
1485
1486 /* Prepare to egress the headers: set up header edesc. */
1487 csum_start = skb_checksum_start_offset(skb);
1488 sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1489 edesc_head.csum = 1;
1490 edesc_head.csum_start = csum_start;
1491 edesc_head.csum_dest = csum_start + skb->csum_offset;
1492 edesc_head.xfer_size = sh_len;
1493
1494 /* This is only used to specify the TLB. */
1495 edesc_head.stack_idx = large_buffer_stack;
1496 edesc_body.stack_idx = large_buffer_stack;
1497
1498 /* Egress all the edescs. */
1499 for (segment = 0; segment < sh->gso_segs; segment++) {
1500 void *va;
1501 unsigned char *buf;
1502 unsigned int p_used = 0;
1503
1504 /* Egress the header. */
1505 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1506 NET_IP_ALIGN;
1507 edesc_head.va = va_to_tile_io_addr(buf);
1508 gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
1509 slot++;
1510
1511 /* Egress the payload. */
1512 while (p_used < p_len) {
1513
1514 /* Advance as needed. */
1515 while (f_used >= f_size) {
1516 f_id++;
1517 f_size = sh->frags[f_id].size;
1518 f_used = 0;
1519 }
1520
1521 va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
1522
1523 /* Use bytes from the current fragment. */
1524 n = p_len - p_used;
1525 if (n > f_size - f_used)
1526 n = f_size - f_used;
1527 f_used += n;
1528 p_used += n;
1529
1530 /* Egress a piece of the payload. */
1531 edesc_body.va = va_to_tile_io_addr(va);
1532 edesc_body.xfer_size = n;
1533 edesc_body.bound = !(p_used < p_len);
1534 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
1535 slot++;
1536 }
1537
1538 tx_packets++;
1539 tx_bytes += sh_len + p_len;
1540
1541 /* The last segment may be less than gso_size. */
1542 data_len -= p_len;
1543 if (data_len < p_len)
1544 p_len = data_len;
1545 }
1546
1547 /* Update stats. */
1548 tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
1549 tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
1550}
1551
1552/* Do "TSO" handling for egress.
1553 *
1554 * Normally drivers set NETIF_F_TSO only to support hardware TSO;
1555 * otherwise the stack uses scatter-gather to implement GSO in software.
1556 * On our testing, enabling GSO support (via NETIF_F_SG) drops network
1557 * performance down to around 7.5 Gbps on the 10G interfaces, although
1558 * also dropping cpu utilization way down, to under 8%. But
1559 * implementing "TSO" in the driver brings performance back up to line
1560 * rate, while dropping cpu usage even further, to less than 4%. In
1561 * practice, profiling of GSO shows that skb_segment() is what causes
1562 * the performance overheads; we benefit in the driver from using
1563 * preallocated memory to duplicate the TCP/IP headers.
1564 */
1565static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1566{
1567 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1568 struct tile_net_priv *priv = netdev_priv(dev);
1569 int channel = priv->echannel;
1570 struct tile_net_egress *egress = &egress_for_echannel[channel];
1571 struct tile_net_comps *comps = info->comps_for_echannel[channel];
1572 gxio_mpipe_equeue_t *equeue = egress->equeue;
1573 unsigned long irqflags;
1574 int num_edescs;
1575 s64 slot;
1576
1577 /* Determine how many mpipe edesc's are needed. */
1578 num_edescs = tso_count_edescs(skb);
1579
1580 local_irq_save(irqflags);
1581
1582 /* Try to acquire a completion entry and an egress slot. */
1583 slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
1584 if (slot < 0) {
1585 local_irq_restore(irqflags);
1586 return NETDEV_TX_BUSY;
1587 }
1588
1589 /* Set up copies of header data properly. */
1590 tso_headers_prepare(skb, egress->headers, slot);
1591
1592 /* Actually pass the data to the network hardware. */
1593 tso_egress(dev, equeue, skb, egress->headers, slot);
1594
1595 /* Add a completion record. */
1596 add_comp(equeue, comps, slot + num_edescs - 1, skb);
1597
1598 local_irq_restore(irqflags);
1599
1600 /* Make sure the egress timer is scheduled. */
1601 tile_net_schedule_egress_timer();
1602
1603 return NETDEV_TX_OK;
1604}
1605
1606/* Analyze the body and frags for a transmit request. */
1607static unsigned int tile_net_tx_frags(struct frag *frags,
1608 struct sk_buff *skb,
1609 void *b_data, unsigned int b_len)
1610{
1611 unsigned int i, n = 0;
1612
1613 struct skb_shared_info *sh = skb_shinfo(skb);
1614
1615 if (b_len != 0) {
1616 frags[n].buf = b_data;
1617 frags[n++].length = b_len;
1618 }
1619
1620 for (i = 0; i < sh->nr_frags; i++) {
1621 skb_frag_t *f = &sh->frags[i];
1622 frags[n].buf = tile_net_frag_buf(f);
1623 frags[n++].length = skb_frag_size(f);
1624 }
1625
1626 return n;
1627}
1628
1629/* Help the kernel transmit a packet. */
1630static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1631{
1632 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1633 struct tile_net_priv *priv = netdev_priv(dev);
1634 struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
1635 gxio_mpipe_equeue_t *equeue = egress->equeue;
1636 struct tile_net_comps *comps =
1637 info->comps_for_echannel[priv->echannel];
1638 unsigned int len = skb->len;
1639 unsigned char *data = skb->data;
1640 unsigned int num_edescs;
1641 struct frag frags[MAX_FRAGS];
1642 gxio_mpipe_edesc_t edescs[MAX_FRAGS];
1643 unsigned long irqflags;
1644 gxio_mpipe_edesc_t edesc = { { 0 } };
1645 unsigned int i;
1646 s64 slot;
1647
1648 if (skb_is_gso(skb))
1649 return tile_net_tx_tso(skb, dev);
1650
1651 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1652
1653 /* This is only used to specify the TLB. */
1654 edesc.stack_idx = large_buffer_stack;
1655
1656 /* Prepare the edescs. */
1657 for (i = 0; i < num_edescs; i++) {
1658 edesc.xfer_size = frags[i].length;
1659 edesc.va = va_to_tile_io_addr(frags[i].buf);
1660 edescs[i] = edesc;
1661 }
1662
1663 /* Mark the final edesc. */
1664 edescs[num_edescs - 1].bound = 1;
1665
1666 /* Add checksum info to the initial edesc, if needed. */
1667 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1668 unsigned int csum_start = skb_checksum_start_offset(skb);
1669 edescs[0].csum = 1;
1670 edescs[0].csum_start = csum_start;
1671 edescs[0].csum_dest = csum_start + skb->csum_offset;
1672 }
1673
1674 local_irq_save(irqflags);
1675
1676 /* Try to acquire a completion entry and an egress slot. */
1677 slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
1678 if (slot < 0) {
1679 local_irq_restore(irqflags);
1680 return NETDEV_TX_BUSY;
1681 }
1682
1683 for (i = 0; i < num_edescs; i++)
1684 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
1685
1686 /* Add a completion record. */
1687 add_comp(equeue, comps, slot - 1, skb);
1688
1689 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
1690 tile_net_stats_add(1, &priv->stats.tx_packets);
1691 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
1692 &priv->stats.tx_bytes);
1693
1694 local_irq_restore(irqflags);
1695
1696 /* Make sure the egress timer is scheduled. */
1697 tile_net_schedule_egress_timer();
1698
1699 return NETDEV_TX_OK;
1700}
1701
1702/* Return subqueue id on this core (one per core). */
1703static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
1704{
1705 return smp_processor_id();
1706}
1707
1708/* Deal with a transmit timeout. */
1709static void tile_net_tx_timeout(struct net_device *dev)
1710{
1711 int cpu;
1712
1713 for_each_online_cpu(cpu)
1714 netif_wake_subqueue(dev, cpu);
1715}
1716
1717/* Ioctl commands. */
1718static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1719{
1720 return -EOPNOTSUPP;
1721}
1722
1723/* Get system network statistics for device. */
1724static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
1725{
1726 struct tile_net_priv *priv = netdev_priv(dev);
1727 return &priv->stats;
1728}
1729
1730/* Change the MTU. */
1731static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
1732{
1733 if ((new_mtu < 68) || (new_mtu > 1500))
1734 return -EINVAL;
1735 dev->mtu = new_mtu;
1736 return 0;
1737}
1738
1739/* Change the Ethernet address of the NIC.
1740 *
1741 * The hypervisor driver does not support changing MAC address. However,
1742 * the hardware does not do anything with the MAC address, so the address
1743 * which gets used on outgoing packets, and which is accepted on incoming
1744 * packets, is completely up to us.
1745 *
1746 * Returns 0 on success, negative on failure.
1747 */
1748static int tile_net_set_mac_address(struct net_device *dev, void *p)
1749{
1750 struct sockaddr *addr = p;
1751
1752 if (!is_valid_ether_addr(addr->sa_data))
1753 return -EINVAL;
1754 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1755 return 0;
1756}
1757
1758#ifdef CONFIG_NET_POLL_CONTROLLER
1759/* Polling 'interrupt' - used by things like netconsole to send skbs
1760 * without having to re-enable interrupts. It's not called while
1761 * the interrupt routine is executing.
1762 */
1763static void tile_net_netpoll(struct net_device *dev)
1764{
1765 disable_percpu_irq(ingress_irq);
1766 tile_net_handle_ingress_irq(ingress_irq, NULL);
1767 enable_percpu_irq(ingress_irq, 0);
1768}
1769#endif
1770
1771static const struct net_device_ops tile_net_ops = {
1772 .ndo_open = tile_net_open,
1773 .ndo_stop = tile_net_stop,
1774 .ndo_start_xmit = tile_net_tx,
1775 .ndo_select_queue = tile_net_select_queue,
1776 .ndo_do_ioctl = tile_net_ioctl,
1777 .ndo_get_stats = tile_net_get_stats,
1778 .ndo_change_mtu = tile_net_change_mtu,
1779 .ndo_tx_timeout = tile_net_tx_timeout,
1780 .ndo_set_mac_address = tile_net_set_mac_address,
1781#ifdef CONFIG_NET_POLL_CONTROLLER
1782 .ndo_poll_controller = tile_net_netpoll,
1783#endif
1784};
1785
1786/* The setup function.
1787 *
1788 * This uses ether_setup() to assign various fields in dev, including
1789 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
1790 */
1791static void tile_net_setup(struct net_device *dev)
1792{
1793 ether_setup(dev);
1794 dev->netdev_ops = &tile_net_ops;
1795 dev->watchdog_timeo = TILE_NET_TIMEOUT;
1796 dev->features |= NETIF_F_LLTX;
1797 dev->features |= NETIF_F_HW_CSUM;
1798 dev->features |= NETIF_F_SG;
1799 dev->features |= NETIF_F_TSO;
1800 dev->mtu = 1500;
1801}
1802
1803/* Allocate the device structure, register the device, and obtain the
1804 * MAC address from the hypervisor.
1805 */
1806static void tile_net_dev_init(const char *name, const uint8_t *mac)
1807{
1808 int ret;
1809 int i;
1810 int nz_addr = 0;
1811 struct net_device *dev;
1812 struct tile_net_priv *priv;
1813
1814 /* HACK: Ignore "loop" links. */
1815 if (strncmp(name, "loop", 4) == 0)
1816 return;
1817
1818 /* Allocate the device structure. Normally, "name" is a
1819 * template, instantiated by register_netdev(), but not for us.
1820 */
1821 dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
1822 NR_CPUS, 1);
1823 if (!dev) {
1824 pr_err("alloc_netdev_mqs(%s) failed\n", name);
1825 return;
1826 }
1827
1828 /* Initialize "priv". */
1829 priv = netdev_priv(dev);
1830 memset(priv, 0, sizeof(*priv));
1831 priv->dev = dev;
1832 priv->channel = -1;
1833 priv->loopify_channel = -1;
1834 priv->echannel = -1;
1835
1836 /* Get the MAC address and set it in the device struct; this must
1837 * be done before the device is opened. If the MAC is all zeroes,
1838 * we use a random address, since we're probably on the simulator.
1839 */
1840 for (i = 0; i < 6; i++)
1841 nz_addr |= mac[i];
1842
1843 if (nz_addr) {
1844 memcpy(dev->dev_addr, mac, 6);
1845 dev->addr_len = 6;
1846 } else {
1847 random_ether_addr(dev->dev_addr);
1848 }
1849
1850 /* Register the network device. */
1851 ret = register_netdev(dev);
1852 if (ret) {
1853 netdev_err(dev, "register_netdev failed %d\n", ret);
1854 free_netdev(dev);
1855 return;
1856 }
1857}
1858
1859/* Per-cpu module initialization. */
1860static void tile_net_init_module_percpu(void *unused)
1861{
1862 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1863 int my_cpu = smp_processor_id();
1864
1865 info->has_iqueue = false;
1866
1867 info->my_cpu = my_cpu;
1868
1869 /* Initialize the egress timer. */
1870 hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1871 info->egress_timer.function = tile_net_handle_egress_timer;
1872}
1873
1874/* Module initialization. */
1875static int __init tile_net_init_module(void)
1876{
1877 int i;
1878 char name[GXIO_MPIPE_LINK_NAME_LEN];
1879 uint8_t mac[6];
1880
1881 pr_info("Tilera Network Driver\n");
1882
1883 mutex_init(&tile_net_devs_for_channel_mutex);
1884
1885 /* Initialize each CPU. */
1886 on_each_cpu(tile_net_init_module_percpu, NULL, 1);
1887
1888 /* Find out what devices we have, and initialize them. */
1889 for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
1890 tile_net_dev_init(name, mac);
1891
1892 if (!network_cpus_init())
1893 network_cpus_map = *cpu_online_mask;
1894
1895 return 0;
1896}
1897
1898module_init(tile_net_init_module);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4ffcd57b011b..2857ab078aac 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -478,6 +478,7 @@ struct netvsc_device {
478 u32 nvsp_version; 478 u32 nvsp_version;
479 479
480 atomic_t num_outstanding_sends; 480 atomic_t num_outstanding_sends;
481 wait_queue_head_t wait_drain;
481 bool start_remove; 482 bool start_remove;
482 bool destroy; 483 bool destroy;
483 /* 484 /*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8b919471472f..0c569831db5a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
42 if (!net_device) 42 if (!net_device)
43 return NULL; 43 return NULL;
44 44
45 init_waitqueue_head(&net_device->wait_drain);
45 net_device->start_remove = false; 46 net_device->start_remove = false;
46 net_device->destroy = false; 47 net_device->destroy = false;
47 net_device->dev = device; 48 net_device->dev = device;
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device)
387 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 388 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
388 389
389 /* Wait for all send completions */ 390 /* Wait for all send completions */
390 while (atomic_read(&net_device->num_outstanding_sends)) { 391 wait_event(net_device->wait_drain,
391 dev_info(&device->device, 392 atomic_read(&net_device->num_outstanding_sends) == 0);
392 "waiting for %d requests to complete...\n",
393 atomic_read(&net_device->num_outstanding_sends));
394 udelay(100);
395 }
396 393
397 netvsc_disconnect_vsp(net_device); 394 netvsc_disconnect_vsp(net_device);
398 395
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device,
486 num_outstanding_sends = 483 num_outstanding_sends =
487 atomic_dec_return(&net_device->num_outstanding_sends); 484 atomic_dec_return(&net_device->num_outstanding_sends);
488 485
486 if (net_device->destroy && num_outstanding_sends == 0)
487 wake_up(&net_device->wait_drain);
488
489 if (netif_queue_stopped(ndev) && !net_device->start_remove && 489 if (netif_queue_stopped(ndev) && !net_device->start_remove &&
490 (hv_ringbuf_avail_percent(&device->channel->outbound) 490 (hv_ringbuf_avail_percent(&device->channel->outbound)
491 > RING_AVAIL_PERCENT_HIWATER || 491 > RING_AVAIL_PERCENT_HIWATER ||
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 5ac46f5226f3..47f8e8939266 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL");
41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ 43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
44#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
45#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
44 46
45static int ip175c_config_init(struct phy_device *phydev) 47static int ip175c_config_init(struct phy_device *phydev)
46{ 48{
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev)
136 if (c < 0) 138 if (c < 0)
137 return c; 139 return c;
138 140
141 /* INTR pin used: speed/link/duplex will cause an interrupt */
142 c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
143 if (c < 0)
144 return c;
145
139 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { 146 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
140 /* Additional delay (2ns) used to adjust RX clock phase 147 /* Additional delay (2ns) used to adjust RX clock phase
141 * at RGMII interface */ 148 * at RGMII interface */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 683ef1ce5519..5061608f408c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
96} 96}
97/** 97/**
98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. 98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
99 * @mdio_np: Pointer to the mii_bus. 99 * @mdio_bus_np: Pointer to the mii_bus.
100 * 100 *
101 * Returns a pointer to the mii_bus, or NULL if none found. 101 * Returns a pointer to the mii_bus, or NULL if none found.
102 * 102 *
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 71e2b0523bc2..3ae80eccd0ef 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -35,6 +35,7 @@
35#include <linux/crc32.h> 35#include <linux/crc32.h>
36#include <linux/usb/usbnet.h> 36#include <linux/usb/usbnet.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/if_vlan.h>
38 39
39#define DRIVER_VERSION "22-Dec-2011" 40#define DRIVER_VERSION "22-Dec-2011"
40#define DRIVER_NAME "asix" 41#define DRIVER_NAME "asix"
@@ -321,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
321 return 0; 322 return 0;
322 } 323 }
323 324
324 if ((size > dev->net->mtu + ETH_HLEN) || 325 if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
325 (size + offset > skb->len)) { 326 (size + offset > skb->len)) {
326 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 327 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
327 size); 328 size);
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index add1064f755d..03c2d8d653df 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
629 return skb->len > 0; 629 return skb->len > 0;
630} 630}
631 631
632static void mcs7830_status(struct usbnet *dev, struct urb *urb)
633{
634 u8 *buf = urb->transfer_buffer;
635 bool link;
636
637 if (urb->actual_length < 16)
638 return;
639
640 link = !(buf[1] & 0x20);
641 if (netif_carrier_ok(dev->net) != link) {
642 if (link) {
643 netif_carrier_on(dev->net);
644 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
645 } else
646 netif_carrier_off(dev->net);
647 netdev_dbg(dev->net, "Link Status is: %d\n", link);
648 }
649}
650
632static const struct driver_info moschip_info = { 651static const struct driver_info moschip_info = {
633 .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", 652 .description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
634 .bind = mcs7830_bind, 653 .bind = mcs7830_bind,
635 .rx_fixup = mcs7830_rx_fixup, 654 .rx_fixup = mcs7830_rx_fixup,
636 .flags = FLAG_ETHER, 655 .flags = FLAG_ETHER | FLAG_LINK_INTR,
656 .status = mcs7830_status,
637 .in = 1, 657 .in = 1,
638 .out = 2, 658 .out = 2,
639}; 659};
@@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = {
642 .description = "Sitecom LN-30 usb-NET adapter", 662 .description = "Sitecom LN-30 usb-NET adapter",
643 .bind = mcs7830_bind, 663 .bind = mcs7830_bind,
644 .rx_fixup = mcs7830_rx_fixup, 664 .rx_fixup = mcs7830_rx_fixup,
645 .flags = FLAG_ETHER, 665 .flags = FLAG_ETHER | FLAG_LINK_INTR,
666 .status = mcs7830_status,
646 .in = 1, 667 .in = 1,
647 .out = 2, 668 .out = 2,
648}; 669};
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 380dbea6109d..3b206786b5e7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -547,6 +547,8 @@ static const struct usb_device_id products[] = {
547 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ 547 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
548 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ 548 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
549 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ 549 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
550 {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
551 {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
550 { } /* END */ 552 { } /* END */
551}; 553};
552MODULE_DEVICE_TABLE(usb, products); 554MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 3faef5670d1f..d75d1f56becf 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
946} 946}
947 947
948static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; 948static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
949static const struct sierra_net_info_data sierra_net_info_data_68A3 = { 949static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
950 .rx_urb_size = 8 * 1024, 950 .rx_urb_size = 8 * 1024,
951 .whitelist = { 951 .whitelist = {
952 .infolen = ARRAY_SIZE(sierra_net_ifnum_list), 952 .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
954 } 954 }
955}; 955};
956 956
957static const struct driver_info sierra_net_info_68A3 = { 957static const struct driver_info sierra_net_info_direct_ip = {
958 .description = "Sierra Wireless USB-to-WWAN Modem", 958 .description = "Sierra Wireless USB-to-WWAN Modem",
959 .flags = FLAG_WWAN | FLAG_SEND_ZLP, 959 .flags = FLAG_WWAN | FLAG_SEND_ZLP,
960 .bind = sierra_net_bind, 960 .bind = sierra_net_bind,
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = {
962 .status = sierra_net_status, 962 .status = sierra_net_status,
963 .rx_fixup = sierra_net_rx_fixup, 963 .rx_fixup = sierra_net_rx_fixup,
964 .tx_fixup = sierra_net_tx_fixup, 964 .tx_fixup = sierra_net_tx_fixup,
965 .data = (unsigned long)&sierra_net_info_data_68A3, 965 .data = (unsigned long)&sierra_net_info_data_direct_ip,
966}; 966};
967 967
968static const struct usb_device_id products[] = { 968static const struct usb_device_id products[] = {
969 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ 969 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
970 .driver_info = (unsigned long) &sierra_net_info_68A3}, 970 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
971 {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
972 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
973 {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
974 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
975 {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
976 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
971 977
972 {}, /* last item */ 978 {}, /* last item */
973}; 979};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ce6995e8d08..f18149ae2588 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
42#define VIRTNET_DRIVER_VERSION "1.0.0" 42#define VIRTNET_DRIVER_VERSION "1.0.0"
43 43
44struct virtnet_stats { 44struct virtnet_stats {
45 struct u64_stats_sync syncp; 45 struct u64_stats_sync tx_syncp;
46 struct u64_stats_sync rx_syncp;
46 u64 tx_bytes; 47 u64 tx_bytes;
47 u64 tx_packets; 48 u64 tx_packets;
48 49
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
300 301
301 hdr = skb_vnet_hdr(skb); 302 hdr = skb_vnet_hdr(skb);
302 303
303 u64_stats_update_begin(&stats->syncp); 304 u64_stats_update_begin(&stats->rx_syncp);
304 stats->rx_bytes += skb->len; 305 stats->rx_bytes += skb->len;
305 stats->rx_packets++; 306 stats->rx_packets++;
306 u64_stats_update_end(&stats->syncp); 307 u64_stats_update_end(&stats->rx_syncp);
307 308
308 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 309 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
309 pr_debug("Needs csum!\n"); 310 pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
565 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { 566 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
566 pr_debug("Sent skb %p\n", skb); 567 pr_debug("Sent skb %p\n", skb);
567 568
568 u64_stats_update_begin(&stats->syncp); 569 u64_stats_update_begin(&stats->tx_syncp);
569 stats->tx_bytes += skb->len; 570 stats->tx_bytes += skb->len;
570 stats->tx_packets++; 571 stats->tx_packets++;
571 u64_stats_update_end(&stats->syncp); 572 u64_stats_update_end(&stats->tx_syncp);
572 573
573 tot_sgs += skb_vnet_hdr(skb)->num_sg; 574 tot_sgs += skb_vnet_hdr(skb)->num_sg;
574 dev_kfree_skb_any(skb); 575 dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
703 u64 tpackets, tbytes, rpackets, rbytes; 704 u64 tpackets, tbytes, rpackets, rbytes;
704 705
705 do { 706 do {
706 start = u64_stats_fetch_begin(&stats->syncp); 707 start = u64_stats_fetch_begin(&stats->tx_syncp);
707 tpackets = stats->tx_packets; 708 tpackets = stats->tx_packets;
708 tbytes = stats->tx_bytes; 709 tbytes = stats->tx_bytes;
710 } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
711
712 do {
713 start = u64_stats_fetch_begin(&stats->rx_syncp);
709 rpackets = stats->rx_packets; 714 rpackets = stats->rx_packets;
710 rbytes = stats->rx_bytes; 715 rbytes = stats->rx_bytes;
711 } while (u64_stats_fetch_retry(&stats->syncp, start)); 716 } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
712 717
713 tot->rx_packets += rpackets; 718 tot->rx_packets += rpackets;
714 tot->tx_packets += tpackets; 719 tot->tx_packets += tpackets;
@@ -1231,11 +1236,6 @@ static int virtnet_freeze(struct virtio_device *vdev)
1231 vi->config_enable = false; 1236 vi->config_enable = false;
1232 mutex_unlock(&vi->config_lock); 1237 mutex_unlock(&vi->config_lock);
1233 1238
1234 virtqueue_disable_cb(vi->rvq);
1235 virtqueue_disable_cb(vi->svq);
1236 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
1237 virtqueue_disable_cb(vi->cvq);
1238
1239 netif_device_detach(vi->dev); 1239 netif_device_detach(vi->dev);
1240 cancel_delayed_work_sync(&vi->refill); 1240 cancel_delayed_work_sync(&vi->refill);
1241 1241
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0ba81a66061f..fbaa30930076 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2415,6 +2415,22 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2415* Initialization routines * 2415* Initialization routines *
2416\*************************/ 2416\*************************/
2417 2417
2418static const struct ieee80211_iface_limit if_limits[] = {
2419 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
2420 { .max = 4, .types =
2421#ifdef CONFIG_MAC80211_MESH
2422 BIT(NL80211_IFTYPE_MESH_POINT) |
2423#endif
2424 BIT(NL80211_IFTYPE_AP) },
2425};
2426
2427static const struct ieee80211_iface_combination if_comb = {
2428 .limits = if_limits,
2429 .n_limits = ARRAY_SIZE(if_limits),
2430 .max_interfaces = 2048,
2431 .num_different_channels = 1,
2432};
2433
2418int __devinit 2434int __devinit
2419ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) 2435ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2420{ 2436{
@@ -2436,6 +2452,9 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2436 BIT(NL80211_IFTYPE_ADHOC) | 2452 BIT(NL80211_IFTYPE_ADHOC) |
2437 BIT(NL80211_IFTYPE_MESH_POINT); 2453 BIT(NL80211_IFTYPE_MESH_POINT);
2438 2454
2455 hw->wiphy->iface_combinations = &if_comb;
2456 hw->wiphy->n_iface_combinations = 1;
2457
2439 /* SW support for IBSS_RSN is provided by mac80211 */ 2458 /* SW support for IBSS_RSN is provided by mac80211 */
2440 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 2459 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
2441 2460
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index ac53d901801d..dfb0441f406c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3809,7 +3809,7 @@ static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
3809 return true; 3809 return true;
3810} 3810}
3811 3811
3812static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) 3812void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3813{ 3813{
3814 int internal_regulator = 3814 int internal_regulator =
3815 ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR); 3815 ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 2505ac44f0c1..8396d150ce01 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -334,4 +334,7 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
334 334
335unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, 335unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
336 struct ath9k_channel *chan); 336 struct ath9k_channel *chan);
337
338void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
339
337#endif 340#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f11d9b2677fd..1bd3a3d22101 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +19,7 @@
18#define INITVALS_9330_1P1_H 19#define INITVALS_9330_1P1_H
19 20
20static const u32 ar9331_1p1_baseband_postamble[][5] = { 21static const u32 ar9331_1p1_baseband_postamble[][5] = {
21 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 22 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
22 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005}, 23 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
23 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e}, 24 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
24 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, 25 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -27,10 +28,10 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
27 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, 28 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
28 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044}, 29 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
29 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4}, 30 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
30 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020}, 31 {0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
31 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, 32 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
32 {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e}, 33 {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
33 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, 34 {0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
34 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 35 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
35 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 36 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
36 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 37 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -55,7 +56,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
55 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 56 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
56 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 57 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
57 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 58 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
58 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981}, 59 {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
59 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 60 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
60 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 61 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
61 {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, 62 {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
@@ -63,7 +64,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
63}; 64};
64 65
65static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = { 66static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
66 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 67 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
67 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, 68 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
68 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52}, 69 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
69 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84}, 70 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -155,7 +156,7 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
155}; 156};
156 157
157static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = { 158static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
158 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 159 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
159 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, 160 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
160 {0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52}, 161 {0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
161 {0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84}, 162 {0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
@@ -245,7 +246,7 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
245}; 246};
246 247
247static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = { 248static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
248 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 249 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
249 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, 250 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
250 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52}, 251 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
251 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84}, 252 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -377,14 +378,14 @@ static const u32 ar9331_1p1_radio_core[][2] = {
377 {0x000160b4, 0x92480040}, 378 {0x000160b4, 0x92480040},
378 {0x000160c0, 0x006db6db}, 379 {0x000160c0, 0x006db6db},
379 {0x000160c4, 0x0186db60}, 380 {0x000160c4, 0x0186db60},
380 {0x000160c8, 0x6db6db6c}, 381 {0x000160c8, 0x6db4db6c},
381 {0x000160cc, 0x6de6c300}, 382 {0x000160cc, 0x6de6c300},
382 {0x000160d0, 0x14500820}, 383 {0x000160d0, 0x14500820},
383 {0x00016100, 0x04cb0001}, 384 {0x00016100, 0x04cb0001},
384 {0x00016104, 0xfff80015}, 385 {0x00016104, 0xfff80015},
385 {0x00016108, 0x00080010}, 386 {0x00016108, 0x00080010},
386 {0x0001610c, 0x00170000}, 387 {0x0001610c, 0x00170000},
387 {0x00016140, 0x10804000}, 388 {0x00016140, 0x10800000},
388 {0x00016144, 0x01884080}, 389 {0x00016144, 0x01884080},
389 {0x00016148, 0x000080c0}, 390 {0x00016148, 0x000080c0},
390 {0x00016280, 0x01000015}, 391 {0x00016280, 0x01000015},
@@ -417,7 +418,7 @@ static const u32 ar9331_1p1_radio_core[][2] = {
417}; 418};
418 419
419static const u32 ar9331_1p1_soc_postamble[][5] = { 420static const u32 ar9331_1p1_soc_postamble[][5] = {
420 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 421 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
421 {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022}, 422 {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
422}; 423};
423 424
@@ -691,7 +692,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
691}; 692};
692 693
693static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = { 694static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
694 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 695 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
695 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, 696 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
696 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52}, 697 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
697 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84}, 698 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -783,7 +784,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
783}; 784};
784 785
785static const u32 ar9331_1p1_mac_postamble[][5] = { 786static const u32 ar9331_1p1_mac_postamble[][5] = {
786 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 787 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
787 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160}, 788 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
788 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c}, 789 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
789 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38}, 790 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
@@ -973,26 +974,27 @@ static const u32 ar9331_1p1_mac_core[][2] = {
973 974
974static const u32 ar9331_common_rx_gain_1p1[][2] = { 975static const u32 ar9331_common_rx_gain_1p1[][2] = {
975 /* Addr allmodes */ 976 /* Addr allmodes */
976 {0x0000a000, 0x00010000}, 977 {0x00009e18, 0x05000000},
977 {0x0000a004, 0x00030002}, 978 {0x0000a000, 0x00060005},
978 {0x0000a008, 0x00050004}, 979 {0x0000a004, 0x00810080},
979 {0x0000a00c, 0x00810080}, 980 {0x0000a008, 0x00830082},
980 {0x0000a010, 0x00830082}, 981 {0x0000a00c, 0x00850084},
981 {0x0000a014, 0x01810180}, 982 {0x0000a010, 0x01820181},
982 {0x0000a018, 0x01830182}, 983 {0x0000a014, 0x01840183},
983 {0x0000a01c, 0x01850184}, 984 {0x0000a018, 0x01880185},
984 {0x0000a020, 0x01890188}, 985 {0x0000a01c, 0x018a0189},
985 {0x0000a024, 0x018b018a}, 986 {0x0000a020, 0x02850284},
986 {0x0000a028, 0x018d018c}, 987 {0x0000a024, 0x02890288},
987 {0x0000a02c, 0x01910190}, 988 {0x0000a028, 0x028b028a},
988 {0x0000a030, 0x01930192}, 989 {0x0000a02c, 0x03850384},
989 {0x0000a034, 0x01950194}, 990 {0x0000a030, 0x03890388},
990 {0x0000a038, 0x038a0196}, 991 {0x0000a034, 0x038b038a},
991 {0x0000a03c, 0x038c038b}, 992 {0x0000a038, 0x038d038c},
992 {0x0000a040, 0x0390038d}, 993 {0x0000a03c, 0x03910390},
993 {0x0000a044, 0x03920391}, 994 {0x0000a040, 0x03930392},
994 {0x0000a048, 0x03940393}, 995 {0x0000a044, 0x03950394},
995 {0x0000a04c, 0x03960395}, 996 {0x0000a048, 0x00000396},
997 {0x0000a04c, 0x00000000},
996 {0x0000a050, 0x00000000}, 998 {0x0000a050, 0x00000000},
997 {0x0000a054, 0x00000000}, 999 {0x0000a054, 0x00000000},
998 {0x0000a058, 0x00000000}, 1000 {0x0000a058, 0x00000000},
@@ -1005,15 +1007,15 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
1005 {0x0000a074, 0x00000000}, 1007 {0x0000a074, 0x00000000},
1006 {0x0000a078, 0x00000000}, 1008 {0x0000a078, 0x00000000},
1007 {0x0000a07c, 0x00000000}, 1009 {0x0000a07c, 0x00000000},
1008 {0x0000a080, 0x22222229}, 1010 {0x0000a080, 0x28282828},
1009 {0x0000a084, 0x1d1d1d1d}, 1011 {0x0000a084, 0x28282828},
1010 {0x0000a088, 0x1d1d1d1d}, 1012 {0x0000a088, 0x28282828},
1011 {0x0000a08c, 0x1d1d1d1d}, 1013 {0x0000a08c, 0x28282828},
1012 {0x0000a090, 0x171d1d1d}, 1014 {0x0000a090, 0x28282828},
1013 {0x0000a094, 0x11111717}, 1015 {0x0000a094, 0x24242428},
1014 {0x0000a098, 0x00030311}, 1016 {0x0000a098, 0x171e1e1e},
1015 {0x0000a09c, 0x00000000}, 1017 {0x0000a09c, 0x02020b0b},
1016 {0x0000a0a0, 0x00000000}, 1018 {0x0000a0a0, 0x02020202},
1017 {0x0000a0a4, 0x00000000}, 1019 {0x0000a0a4, 0x00000000},
1018 {0x0000a0a8, 0x00000000}, 1020 {0x0000a0a8, 0x00000000},
1019 {0x0000a0ac, 0x00000000}, 1021 {0x0000a0ac, 0x00000000},
@@ -1021,27 +1023,27 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
1021 {0x0000a0b4, 0x00000000}, 1023 {0x0000a0b4, 0x00000000},
1022 {0x0000a0b8, 0x00000000}, 1024 {0x0000a0b8, 0x00000000},
1023 {0x0000a0bc, 0x00000000}, 1025 {0x0000a0bc, 0x00000000},
1024 {0x0000a0c0, 0x001f0000}, 1026 {0x0000a0c0, 0x22072208},
1025 {0x0000a0c4, 0x01000101}, 1027 {0x0000a0c4, 0x22052206},
1026 {0x0000a0c8, 0x011e011f}, 1028 {0x0000a0c8, 0x22032204},
1027 {0x0000a0cc, 0x011c011d}, 1029 {0x0000a0cc, 0x22012202},
1028 {0x0000a0d0, 0x02030204}, 1030 {0x0000a0d0, 0x221f2200},
1029 {0x0000a0d4, 0x02010202}, 1031 {0x0000a0d4, 0x221d221e},
1030 {0x0000a0d8, 0x021f0200}, 1032 {0x0000a0d8, 0x33023303},
1031 {0x0000a0dc, 0x0302021e}, 1033 {0x0000a0dc, 0x33003301},
1032 {0x0000a0e0, 0x03000301}, 1034 {0x0000a0e0, 0x331e331f},
1033 {0x0000a0e4, 0x031e031f}, 1035 {0x0000a0e4, 0x4402331d},
1034 {0x0000a0e8, 0x0402031d}, 1036 {0x0000a0e8, 0x44004401},
1035 {0x0000a0ec, 0x04000401}, 1037 {0x0000a0ec, 0x441e441f},
1036 {0x0000a0f0, 0x041e041f}, 1038 {0x0000a0f0, 0x55025503},
1037 {0x0000a0f4, 0x0502041d}, 1039 {0x0000a0f4, 0x55005501},
1038 {0x0000a0f8, 0x05000501}, 1040 {0x0000a0f8, 0x551e551f},
1039 {0x0000a0fc, 0x051e051f}, 1041 {0x0000a0fc, 0x6602551d},
1040 {0x0000a100, 0x06010602}, 1042 {0x0000a100, 0x66006601},
1041 {0x0000a104, 0x061f0600}, 1043 {0x0000a104, 0x661e661f},
1042 {0x0000a108, 0x061d061e}, 1044 {0x0000a108, 0x7703661d},
1043 {0x0000a10c, 0x07020703}, 1045 {0x0000a10c, 0x77017702},
1044 {0x0000a110, 0x07000701}, 1046 {0x0000a110, 0x00007700},
1045 {0x0000a114, 0x00000000}, 1047 {0x0000a114, 0x00000000},
1046 {0x0000a118, 0x00000000}, 1048 {0x0000a118, 0x00000000},
1047 {0x0000a11c, 0x00000000}, 1049 {0x0000a11c, 0x00000000},
@@ -1054,26 +1056,26 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
1054 {0x0000a138, 0x00000000}, 1056 {0x0000a138, 0x00000000},
1055 {0x0000a13c, 0x00000000}, 1057 {0x0000a13c, 0x00000000},
1056 {0x0000a140, 0x001f0000}, 1058 {0x0000a140, 0x001f0000},
1057 {0x0000a144, 0x01000101}, 1059 {0x0000a144, 0x111f1100},
1058 {0x0000a148, 0x011e011f}, 1060 {0x0000a148, 0x111d111e},
1059 {0x0000a14c, 0x011c011d}, 1061 {0x0000a14c, 0x111b111c},
1060 {0x0000a150, 0x02030204}, 1062 {0x0000a150, 0x22032204},
1061 {0x0000a154, 0x02010202}, 1063 {0x0000a154, 0x22012202},
1062 {0x0000a158, 0x021f0200}, 1064 {0x0000a158, 0x221f2200},
1063 {0x0000a15c, 0x0302021e}, 1065 {0x0000a15c, 0x221d221e},
1064 {0x0000a160, 0x03000301}, 1066 {0x0000a160, 0x33013302},
1065 {0x0000a164, 0x031e031f}, 1067 {0x0000a164, 0x331f3300},
1066 {0x0000a168, 0x0402031d}, 1068 {0x0000a168, 0x4402331e},
1067 {0x0000a16c, 0x04000401}, 1069 {0x0000a16c, 0x44004401},
1068 {0x0000a170, 0x041e041f}, 1070 {0x0000a170, 0x441e441f},
1069 {0x0000a174, 0x0502041d}, 1071 {0x0000a174, 0x55015502},
1070 {0x0000a178, 0x05000501}, 1072 {0x0000a178, 0x551f5500},
1071 {0x0000a17c, 0x051e051f}, 1073 {0x0000a17c, 0x6602551e},
1072 {0x0000a180, 0x06010602}, 1074 {0x0000a180, 0x66006601},
1073 {0x0000a184, 0x061f0600}, 1075 {0x0000a184, 0x661e661f},
1074 {0x0000a188, 0x061d061e}, 1076 {0x0000a188, 0x7703661d},
1075 {0x0000a18c, 0x07020703}, 1077 {0x0000a18c, 0x77017702},
1076 {0x0000a190, 0x07000701}, 1078 {0x0000a190, 0x00007700},
1077 {0x0000a194, 0x00000000}, 1079 {0x0000a194, 0x00000000},
1078 {0x0000a198, 0x00000000}, 1080 {0x0000a198, 0x00000000},
1079 {0x0000a19c, 0x00000000}, 1081 {0x0000a19c, 0x00000000},
@@ -1100,14 +1102,14 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
1100 {0x0000a1f0, 0x00000396}, 1102 {0x0000a1f0, 0x00000396},
1101 {0x0000a1f4, 0x00000396}, 1103 {0x0000a1f4, 0x00000396},
1102 {0x0000a1f8, 0x00000396}, 1104 {0x0000a1f8, 0x00000396},
1103 {0x0000a1fc, 0x00000196}, 1105 {0x0000a1fc, 0x00000296},
1104}; 1106};
1105 1107
1106static const u32 ar9331_common_tx_gain_offset1_1[][1] = { 1108static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
1107 {0}, 1109 {0x00000000},
1108 {3}, 1110 {0x00000003},
1109 {0}, 1111 {0x00000000},
1110 {0}, 1112 {0x00000000},
1111}; 1113};
1112 1114
1113static const u32 ar9331_1p1_chansel_xtal_25M[] = { 1115static const u32 ar9331_1p1_chansel_xtal_25M[] = {
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index abe05ec85d50..7db1890448f2 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1468,6 +1468,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1468 return false; 1468 return false;
1469 1469
1470 ah->chip_fullsleep = false; 1470 ah->chip_fullsleep = false;
1471
1472 if (AR_SREV_9330(ah))
1473 ar9003_hw_internal_regulator_apply(ah);
1471 ath9k_hw_init_pll(ah, chan); 1474 ath9k_hw_init_pll(ah, chan);
1472 ath9k_hw_set_rfmode(ah, chan); 1475 ath9k_hw_set_rfmode(ah, chan);
1473 1476
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dfa78e8b6470..4de4473776ac 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -239,7 +239,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
239{ 239{
240 struct ath_hw *ah = sc->sc_ah; 240 struct ath_hw *ah = sc->sc_ah;
241 struct ath_common *common = ath9k_hw_common(ah); 241 struct ath_common *common = ath9k_hw_common(ah);
242 bool ret; 242 bool ret = true;
243 243
244 ieee80211_stop_queues(sc->hw); 244 ieee80211_stop_queues(sc->hw);
245 245
@@ -250,11 +250,12 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
250 ath9k_debug_samp_bb_mac(sc); 250 ath9k_debug_samp_bb_mac(sc);
251 ath9k_hw_disable_interrupts(ah); 251 ath9k_hw_disable_interrupts(ah);
252 252
253 ret = ath_drain_all_txq(sc, retry_tx);
254
255 if (!ath_stoprecv(sc)) 253 if (!ath_stoprecv(sc))
256 ret = false; 254 ret = false;
257 255
256 if (!ath_drain_all_txq(sc, retry_tx))
257 ret = false;
258
258 if (!flush) { 259 if (!flush) {
259 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 260 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
260 ath_rx_tasklet(sc, 1, true); 261 ath_rx_tasklet(sc, 1, true);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 23eaa1b26ebe..d59dd01d6cde 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -64,7 +64,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq, 65 struct ath_txq *txq,
66 struct ath_atx_tid *tid, 66 struct ath_atx_tid *tid,
67 struct sk_buff *skb); 67 struct sk_buff *skb,
68 bool dequeue);
68 69
69enum { 70enum {
70 MCS_HT20, 71 MCS_HT20,
@@ -811,7 +812,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
811 fi = get_frame_info(skb); 812 fi = get_frame_info(skb);
812 bf = fi->bf; 813 bf = fi->bf;
813 if (!fi->bf) 814 if (!fi->bf)
814 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 815 bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
815 816
816 if (!bf) 817 if (!bf)
817 continue; 818 continue;
@@ -1726,7 +1727,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1726 return; 1727 return;
1727 } 1728 }
1728 1729
1729 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1730 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
1730 if (!bf) 1731 if (!bf)
1731 return; 1732 return;
1732 1733
@@ -1753,7 +1754,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1753 1754
1754 bf = fi->bf; 1755 bf = fi->bf;
1755 if (!bf) 1756 if (!bf)
1756 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 1757 bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
1757 1758
1758 if (!bf) 1759 if (!bf)
1759 return; 1760 return;
@@ -1814,7 +1815,8 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1814static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 1815static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1815 struct ath_txq *txq, 1816 struct ath_txq *txq,
1816 struct ath_atx_tid *tid, 1817 struct ath_atx_tid *tid,
1817 struct sk_buff *skb) 1818 struct sk_buff *skb,
1819 bool dequeue)
1818{ 1820{
1819 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1821 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1820 struct ath_frame_info *fi = get_frame_info(skb); 1822 struct ath_frame_info *fi = get_frame_info(skb);
@@ -1863,6 +1865,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1863 return bf; 1865 return bf;
1864 1866
1865error: 1867error:
1868 if (dequeue)
1869 __skb_unlink(skb, &tid->buf_q);
1866 dev_kfree_skb_any(skb); 1870 dev_kfree_skb_any(skb);
1867 return NULL; 1871 return NULL;
1868} 1872}
@@ -1893,7 +1897,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1893 */ 1897 */
1894 ath_tx_send_ampdu(sc, tid, skb, txctl); 1898 ath_tx_send_ampdu(sc, tid, skb, txctl);
1895 } else { 1899 } else {
1896 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1900 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
1897 if (!bf) 1901 if (!bf)
1898 return; 1902 return;
1899 1903
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 67c13af6f206..c06b6cb5c91e 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -877,6 +877,10 @@ struct b43_wl {
877 * from the mac80211 subsystem. */ 877 * from the mac80211 subsystem. */
878 u16 mac80211_initially_registered_queues; 878 u16 mac80211_initially_registered_queues;
879 879
880 /* Set this if we call ieee80211_register_hw() and check if we call
881 * ieee80211_unregister_hw(). */
882 bool hw_registred;
883
880 /* We can only have one operating interface (802.11 core) 884 /* We can only have one operating interface (802.11 core)
881 * at a time. General information about this interface follows. 885 * at a time. General information about this interface follows.
882 */ 886 */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5a39b226b2e3..1b988f26bdf1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2437,6 +2437,7 @@ start_ieee80211:
2437 err = ieee80211_register_hw(wl->hw); 2437 err = ieee80211_register_hw(wl->hw);
2438 if (err) 2438 if (err)
2439 goto err_one_core_detach; 2439 goto err_one_core_detach;
2440 wl->hw_registred = true;
2440 b43_leds_register(wl->current_dev); 2441 b43_leds_register(wl->current_dev);
2441 goto out; 2442 goto out;
2442 2443
@@ -3766,7 +3767,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
3766 if (prev_status >= B43_STAT_STARTED) { 3767 if (prev_status >= B43_STAT_STARTED) {
3767 err = b43_wireless_core_start(up_dev); 3768 err = b43_wireless_core_start(up_dev);
3768 if (err) { 3769 if (err) {
3769 b43err(wl, "Fatal: Coult not start device for " 3770 b43err(wl, "Fatal: Could not start device for "
3770 "selected %s-GHz band\n", 3771 "selected %s-GHz band\n",
3771 band_to_string(chan->band)); 3772 band_to_string(chan->band));
3772 b43_wireless_core_exit(up_dev); 3773 b43_wireless_core_exit(up_dev);
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5299 5300
5300 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; 5301 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
5301 wl->mac80211_initially_registered_queues = hw->queues; 5302 wl->mac80211_initially_registered_queues = hw->queues;
5303 wl->hw_registred = false;
5302 hw->max_rates = 2; 5304 hw->max_rates = 2;
5303 SET_IEEE80211_DEV(hw, dev->dev); 5305 SET_IEEE80211_DEV(hw, dev->dev);
5304 if (is_valid_ether_addr(sprom->et1mac)) 5306 if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core)
5370 * as the ieee80211 unreg will destroy the workqueue. */ 5372 * as the ieee80211 unreg will destroy the workqueue. */
5371 cancel_work_sync(&wldev->restart_work); 5373 cancel_work_sync(&wldev->restart_work);
5372 5374
5373 /* Restore the queues count before unregistering, because firmware detect 5375 B43_WARN_ON(!wl);
5374 * might have modified it. Restoring is important, so the networking 5376 if (wl->current_dev == wldev && wl->hw_registred) {
5375 * stack can properly free resources. */ 5377 /* Restore the queues count before unregistering, because firmware detect
5376 wl->hw->queues = wl->mac80211_initially_registered_queues; 5378 * might have modified it. Restoring is important, so the networking
5377 b43_leds_stop(wldev); 5379 * stack can properly free resources. */
5378 ieee80211_unregister_hw(wl->hw); 5380 wl->hw->queues = wl->mac80211_initially_registered_queues;
5381 b43_leds_stop(wldev);
5382 ieee80211_unregister_hw(wl->hw);
5383 }
5379 5384
5380 b43_one_core_detach(wldev->dev); 5385 b43_one_core_detach(wldev->dev);
5381 5386
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5446 cancel_work_sync(&wldev->restart_work); 5451 cancel_work_sync(&wldev->restart_work);
5447 5452
5448 B43_WARN_ON(!wl); 5453 B43_WARN_ON(!wl);
5449 if (wl->current_dev == wldev) { 5454 if (wl->current_dev == wldev && wl->hw_registred) {
5450 /* Restore the queues count before unregistering, because firmware detect 5455 /* Restore the queues count before unregistering, because firmware detect
5451 * might have modified it. Restoring is important, so the networking 5456 * might have modified it. Restoring is important, so the networking
5452 * stack can properly free resources. */ 5457 * stack can properly free resources. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index cd9c9bc186d9..eae691e2f7dd 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2633,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
2633 if (prev_status >= B43legacy_STAT_STARTED) { 2633 if (prev_status >= B43legacy_STAT_STARTED) {
2634 err = b43legacy_wireless_core_start(up_dev); 2634 err = b43legacy_wireless_core_start(up_dev);
2635 if (err) { 2635 if (err) {
2636 b43legacyerr(wl, "Fatal: Coult not start device for " 2636 b43legacyerr(wl, "Fatal: Could not start device for "
2637 "newly selected %s-PHY mode\n", 2637 "newly selected %s-PHY mode\n",
2638 phymode_to_string(new_mode)); 2638 phymode_to_string(new_mode));
2639 b43legacy_wireless_core_exit(up_dev); 2639 b43legacy_wireless_core_exit(up_dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e2480d196276..8e7e6928c936 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
89 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; 89 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
90 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); 90 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
91 91
92 /* redirect, configure ane enable io for interrupt signal */ 92 /* redirect, configure and enable io for interrupt signal */
93 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; 93 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
94 if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH) 94 if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
95 data |= SDIO_SEPINT_ACT_HI; 95 data |= SDIO_SEPINT_ACT_HI;
96 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); 96 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
97 97
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index c5a34ffe6459..a299d42da8e7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -28,6 +28,7 @@
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/firmware.h> 29#include <linux/firmware.h>
30#include <linux/usb.h> 30#include <linux/usb.h>
31#include <linux/vmalloc.h>
31#include <net/cfg80211.h> 32#include <net/cfg80211.h>
32 33
33#include <defs.h> 34#include <defs.h>
@@ -1239,7 +1240,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
1239 return -EINVAL; 1240 return -EINVAL;
1240 } 1241 }
1241 1242
1242 devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */ 1243 devinfo->image = vmalloc(fw->size); /* plus nvram */
1243 if (!devinfo->image) 1244 if (!devinfo->image)
1244 return -ENOMEM; 1245 return -ENOMEM;
1245 1246
@@ -1603,7 +1604,7 @@ static struct usb_driver brcmf_usbdrvr = {
1603void brcmf_usb_exit(void) 1604void brcmf_usb_exit(void)
1604{ 1605{
1605 usb_deregister(&brcmf_usbdrvr); 1606 usb_deregister(&brcmf_usbdrvr);
1606 kfree(g_image.data); 1607 vfree(g_image.data);
1607 g_image.data = NULL; 1608 g_image.data = NULL;
1608 g_image.len = 0; 1609 g_image.len = 0;
1609} 1610}
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9cfae0c08707..95aa8e1683ec 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1903 netif_stop_queue(priv->net_dev); 1903 netif_stop_queue(priv->net_dev);
1904} 1904}
1905 1905
1906/* Called by register_netdev() */
1907static int ipw2100_net_init(struct net_device *dev)
1908{
1909 struct ipw2100_priv *priv = libipw_priv(dev);
1910
1911 return ipw2100_up(priv, 1);
1912}
1913
1914static int ipw2100_wdev_init(struct net_device *dev) 1906static int ipw2100_wdev_init(struct net_device *dev)
1915{ 1907{
1916 struct ipw2100_priv *priv = libipw_priv(dev); 1908 struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
6087 .ndo_stop = ipw2100_close, 6079 .ndo_stop = ipw2100_close,
6088 .ndo_start_xmit = libipw_xmit, 6080 .ndo_start_xmit = libipw_xmit,
6089 .ndo_change_mtu = libipw_change_mtu, 6081 .ndo_change_mtu = libipw_change_mtu,
6090 .ndo_init = ipw2100_net_init,
6091 .ndo_tx_timeout = ipw2100_tx_timeout, 6082 .ndo_tx_timeout = ipw2100_tx_timeout,
6092 .ndo_set_mac_address = ipw2100_set_address, 6083 .ndo_set_mac_address = ipw2100_set_address,
6093 .ndo_validate_addr = eth_validate_addr, 6084 .ndo_validate_addr = eth_validate_addr,
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6329 printk(KERN_INFO DRV_NAME 6320 printk(KERN_INFO DRV_NAME
6330 ": Detected Intel PRO/Wireless 2100 Network Connection\n"); 6321 ": Detected Intel PRO/Wireless 2100 Network Connection\n");
6331 6322
6323 err = ipw2100_up(priv, 1);
6324 if (err)
6325 goto fail;
6326
6332 err = ipw2100_wdev_init(dev); 6327 err = ipw2100_wdev_init(dev);
6333 if (err) 6328 if (err)
6334 goto fail; 6329 goto fail;
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6338 * network device we would call ipw2100_up. This introduced a race 6333 * network device we would call ipw2100_up. This introduced a race
6339 * condition with newer hotplug configurations (network was coming 6334 * condition with newer hotplug configurations (network was coming
6340 * up and making calls before the device was initialized). 6335 * up and making calls before the device was initialized).
6341 * 6336 */
6342 * If we called ipw2100_up before we registered the device, then the
6343 * device name wasn't registered. So, we instead use the net_dev->init
6344 * member to call a function that then just turns and calls ipw2100_up.
6345 * net_dev->init is called after name allocation but before the
6346 * notifier chain is called */
6347 err = register_netdev(dev); 6337 err = register_netdev(dev);
6348 if (err) { 6338 if (err) {
6349 printk(KERN_WARNING DRV_NAME 6339 printk(KERN_WARNING DRV_NAME
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index db6c6e528022..2463c0626438 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -137,11 +137,3 @@ config IWLWIFI_EXPERIMENTAL_MFP
137 even if the microcode doesn't advertise it. 137 even if the microcode doesn't advertise it.
138 138
139 Say Y only if you want to experiment with MFP. 139 Say Y only if you want to experiment with MFP.
140
141config IWLWIFI_UCODE16
142 bool "support uCode 16.0"
143 depends on IWLWIFI
144 help
145 This option enables support for uCode version 16.0.
146
147 Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 406f297a9a56..d615eacbf050 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -18,7 +18,6 @@ iwlwifi-objs += iwl-notif-wait.o
18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o 18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
19 19
20 20
21iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
22iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 21iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
23iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 22iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
24iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o 23iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 7f793417c787..8133105ac645 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -79,7 +79,7 @@ static const struct iwl_base_params iwl2000_base_params = {
79 .chain_noise_scale = 1000, 79 .chain_noise_scale = 1000,
80 .wd_timeout = IWL_DEF_WD_TIMEOUT, 80 .wd_timeout = IWL_DEF_WD_TIMEOUT,
81 .max_event_log_size = 512, 81 .max_event_log_size = 512,
82 .shadow_reg_enable = true, 82 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
83 .hd_v2 = true, 83 .hd_v2 = true,
84}; 84};
85 85
@@ -97,7 +97,7 @@ static const struct iwl_base_params iwl2030_base_params = {
97 .chain_noise_scale = 1000, 97 .chain_noise_scale = 1000,
98 .wd_timeout = IWL_LONG_WD_TIMEOUT, 98 .wd_timeout = IWL_LONG_WD_TIMEOUT,
99 .max_event_log_size = 512, 99 .max_event_log_size = 512,
100 .shadow_reg_enable = true, 100 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
101 .hd_v2 = true, 101 .hd_v2 = true,
102}; 102};
103 103
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 381b02cf339c..e5e8ada4aaf6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -35,17 +35,20 @@
35#define IWL6000_UCODE_API_MAX 6 35#define IWL6000_UCODE_API_MAX 6
36#define IWL6050_UCODE_API_MAX 5 36#define IWL6050_UCODE_API_MAX 5
37#define IWL6000G2_UCODE_API_MAX 6 37#define IWL6000G2_UCODE_API_MAX 6
38#define IWL6035_UCODE_API_MAX 6
38 39
39/* Oldest version we won't warn about */ 40/* Oldest version we won't warn about */
40#define IWL6000_UCODE_API_OK 4 41#define IWL6000_UCODE_API_OK 4
41#define IWL6000G2_UCODE_API_OK 5 42#define IWL6000G2_UCODE_API_OK 5
42#define IWL6050_UCODE_API_OK 5 43#define IWL6050_UCODE_API_OK 5
43#define IWL6000G2B_UCODE_API_OK 6 44#define IWL6000G2B_UCODE_API_OK 6
45#define IWL6035_UCODE_API_OK 6
44 46
45/* Lowest firmware API version supported */ 47/* Lowest firmware API version supported */
46#define IWL6000_UCODE_API_MIN 4 48#define IWL6000_UCODE_API_MIN 4
47#define IWL6050_UCODE_API_MIN 4 49#define IWL6050_UCODE_API_MIN 4
48#define IWL6000G2_UCODE_API_MIN 4 50#define IWL6000G2_UCODE_API_MIN 5
51#define IWL6035_UCODE_API_MIN 6
49 52
50/* EEPROM versions */ 53/* EEPROM versions */
51#define EEPROM_6000_TX_POWER_VERSION (4) 54#define EEPROM_6000_TX_POWER_VERSION (4)
@@ -86,7 +89,7 @@ static const struct iwl_base_params iwl6000_base_params = {
86 .chain_noise_scale = 1000, 89 .chain_noise_scale = 1000,
87 .wd_timeout = IWL_DEF_WD_TIMEOUT, 90 .wd_timeout = IWL_DEF_WD_TIMEOUT,
88 .max_event_log_size = 512, 91 .max_event_log_size = 512,
89 .shadow_reg_enable = true, 92 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
90}; 93};
91 94
92static const struct iwl_base_params iwl6050_base_params = { 95static const struct iwl_base_params iwl6050_base_params = {
@@ -102,7 +105,7 @@ static const struct iwl_base_params iwl6050_base_params = {
102 .chain_noise_scale = 1500, 105 .chain_noise_scale = 1500,
103 .wd_timeout = IWL_DEF_WD_TIMEOUT, 106 .wd_timeout = IWL_DEF_WD_TIMEOUT,
104 .max_event_log_size = 1024, 107 .max_event_log_size = 1024,
105 .shadow_reg_enable = true, 108 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
106}; 109};
107 110
108static const struct iwl_base_params iwl6000_g2_base_params = { 111static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -118,7 +121,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
118 .chain_noise_scale = 1000, 121 .chain_noise_scale = 1000,
119 .wd_timeout = IWL_LONG_WD_TIMEOUT, 122 .wd_timeout = IWL_LONG_WD_TIMEOUT,
120 .max_event_log_size = 512, 123 .max_event_log_size = 512,
121 .shadow_reg_enable = true, 124 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
122}; 125};
123 126
124static const struct iwl_ht_params iwl6000_ht_params = { 127static const struct iwl_ht_params iwl6000_ht_params = {
@@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
227 IWL_DEVICE_6030, 230 IWL_DEVICE_6030,
228}; 231};
229 232
233#define IWL_DEVICE_6035 \
234 .fw_name_pre = IWL6030_FW_PRE, \
235 .ucode_api_max = IWL6035_UCODE_API_MAX, \
236 .ucode_api_ok = IWL6035_UCODE_API_OK, \
237 .ucode_api_min = IWL6035_UCODE_API_MIN, \
238 .device_family = IWL_DEVICE_FAMILY_6030, \
239 .max_inst_size = IWL60_RTC_INST_SIZE, \
240 .max_data_size = IWL60_RTC_DATA_SIZE, \
241 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
242 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
243 .base_params = &iwl6000_g2_base_params, \
244 .bt_params = &iwl6000_bt_params, \
245 .need_temp_offset_calib = true, \
246 .led_mode = IWL_LED_RF_STATE, \
247 .adv_pm = true
248
230const struct iwl_cfg iwl6035_2agn_cfg = { 249const struct iwl_cfg iwl6035_2agn_cfg = {
231 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 250 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
232 IWL_DEVICE_6030, 251 IWL_DEVICE_6035,
233 .ht_params = &iwl6000_ht_params, 252 .ht_params = &iwl6000_ht_params,
234}; 253};
235 254
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 51e1a69ffdda..8cebd7c363fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -884,6 +884,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
884 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) || 884 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
885 (priv->bt_full_concurrent != full_concurrent)) { 885 (priv->bt_full_concurrent != full_concurrent)) {
886 priv->bt_full_concurrent = full_concurrent; 886 priv->bt_full_concurrent = full_concurrent;
887 priv->last_bt_traffic_load = priv->bt_traffic_load;
887 888
888 /* Update uCode's rate table. */ 889 /* Update uCode's rate table. */
889 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 890 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index b31584e87bc7..eb6a8eaf42fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -772,7 +772,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
772 ~IWL_STA_DRIVER_ACTIVE; 772 ~IWL_STA_DRIVER_ACTIVE;
773 priv->stations[i].used &= 773 priv->stations[i].used &=
774 ~IWL_STA_UCODE_INPROGRESS; 774 ~IWL_STA_UCODE_INPROGRESS;
775 spin_unlock_bh(&priv->sta_lock); 775 continue;
776 } 776 }
777 /* 777 /*
778 * Rate scaling has already been initialized, send 778 * Rate scaling has already been initialized, send
@@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1267 key_flags |= STA_KEY_MULTICAST_MSK; 1267 key_flags |= STA_KEY_MULTICAST_MSK;
1268 1268
1269 sta_cmd.key.key_flags = key_flags; 1269 sta_cmd.key.key_flags = key_flags;
1270 sta_cmd.key.key_offset = WEP_INVALID_OFFSET; 1270 sta_cmd.key.key_offset = keyconf->hw_key_idx;
1271 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; 1271 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1272 sta_cmd.mode = STA_CONTROL_MODIFY_MSK; 1272 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1273 1273
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 3c72bad0ae56..fac67a526a30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -657,17 +657,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
657 return -EINVAL; 657 return -EINVAL;
658} 658}
659 659
660static int alloc_pci_desc(struct iwl_drv *drv, 660static int iwl_alloc_ucode(struct iwl_drv *drv,
661 struct iwl_firmware_pieces *pieces, 661 struct iwl_firmware_pieces *pieces,
662 enum iwl_ucode_type type) 662 enum iwl_ucode_type type)
663{ 663{
664 int i; 664 int i;
665 for (i = 0; 665 for (i = 0;
666 i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i); 666 i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
667 i++) 667 i++)
668 if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]), 668 if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
669 get_sec(pieces, type, i))) 669 get_sec(pieces, type, i)))
670 return -1; 670 return -ENOMEM;
671 return 0; 671 return 0;
672} 672}
673 673
@@ -825,8 +825,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
825 * 1) unmodified from disk 825 * 1) unmodified from disk
826 * 2) backup cache for save/restore during power-downs */ 826 * 2) backup cache for save/restore during power-downs */
827 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) 827 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
828 if (alloc_pci_desc(drv, &pieces, i)) 828 if (iwl_alloc_ucode(drv, &pieces, i))
829 goto err_pci_alloc; 829 goto out_free_fw;
830 830
831 /* Now that we can no longer fail, copy information */ 831 /* Now that we can no longer fail, copy information */
832 832
@@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
861 861
862 /* We have our copies now, allow OS release its copies */ 862 /* We have our copies now, allow OS release its copies */
863 release_firmware(ucode_raw); 863 release_firmware(ucode_raw);
864 complete(&drv->request_firmware_complete);
865 864
866 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); 865 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
867 866
868 if (!drv->op_mode) 867 if (!drv->op_mode)
869 goto out_unbind; 868 goto out_unbind;
870 869
870 /*
871 * Complete the firmware request last so that
872 * a driver unbind (stop) doesn't run while we
873 * are doing the start() above.
874 */
875 complete(&drv->request_firmware_complete);
871 return; 876 return;
872 877
873 try_again: 878 try_again:
@@ -877,7 +882,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
877 goto out_unbind; 882 goto out_unbind;
878 return; 883 return;
879 884
880 err_pci_alloc: 885 out_free_fw:
881 IWL_ERR(drv, "failed to allocate pci memory\n"); 886 IWL_ERR(drv, "failed to allocate pci memory\n");
882 iwl_dealloc_ucode(drv); 887 iwl_dealloc_ucode(drv);
883 release_firmware(ucode_raw); 888 release_firmware(ucode_raw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 50c58911e718..b8e2b223ac36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
568 * iwl_get_max_txpower_avg - get the highest tx power from all chains. 568 * iwl_get_max_txpower_avg - get the highest tx power from all chains.
569 * find the highest tx power from all chains for the channel 569 * find the highest tx power from all chains for the channel
570 */ 570 */
571static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg, 571static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
572 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, 572 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
573 int element, s8 *max_txpower_in_half_dbm) 573 int element, s8 *max_txpower_in_half_dbm)
574{ 574{
575 s8 max_txpower_avg = 0; /* (dBm) */ 575 s8 max_txpower_avg = 0; /* (dBm) */
576 576
577 /* Take the highest tx power from any valid chains */ 577 /* Take the highest tx power from any valid chains */
578 if ((cfg->valid_tx_ant & ANT_A) && 578 if ((priv->hw_params.valid_tx_ant & ANT_A) &&
579 (enhanced_txpower[element].chain_a_max > max_txpower_avg)) 579 (enhanced_txpower[element].chain_a_max > max_txpower_avg))
580 max_txpower_avg = enhanced_txpower[element].chain_a_max; 580 max_txpower_avg = enhanced_txpower[element].chain_a_max;
581 if ((cfg->valid_tx_ant & ANT_B) && 581 if ((priv->hw_params.valid_tx_ant & ANT_B) &&
582 (enhanced_txpower[element].chain_b_max > max_txpower_avg)) 582 (enhanced_txpower[element].chain_b_max > max_txpower_avg))
583 max_txpower_avg = enhanced_txpower[element].chain_b_max; 583 max_txpower_avg = enhanced_txpower[element].chain_b_max;
584 if ((cfg->valid_tx_ant & ANT_C) && 584 if ((priv->hw_params.valid_tx_ant & ANT_C) &&
585 (enhanced_txpower[element].chain_c_max > max_txpower_avg)) 585 (enhanced_txpower[element].chain_c_max > max_txpower_avg))
586 max_txpower_avg = enhanced_txpower[element].chain_c_max; 586 max_txpower_avg = enhanced_txpower[element].chain_c_max;
587 if (((cfg->valid_tx_ant == ANT_AB) | 587 if (((priv->hw_params.valid_tx_ant == ANT_AB) |
588 (cfg->valid_tx_ant == ANT_BC) | 588 (priv->hw_params.valid_tx_ant == ANT_BC) |
589 (cfg->valid_tx_ant == ANT_AC)) && 589 (priv->hw_params.valid_tx_ant == ANT_AC)) &&
590 (enhanced_txpower[element].mimo2_max > max_txpower_avg)) 590 (enhanced_txpower[element].mimo2_max > max_txpower_avg))
591 max_txpower_avg = enhanced_txpower[element].mimo2_max; 591 max_txpower_avg = enhanced_txpower[element].mimo2_max;
592 if ((cfg->valid_tx_ant == ANT_ABC) && 592 if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
593 (enhanced_txpower[element].mimo3_max > max_txpower_avg)) 593 (enhanced_txpower[element].mimo3_max > max_txpower_avg))
594 max_txpower_avg = enhanced_txpower[element].mimo3_max; 594 max_txpower_avg = enhanced_txpower[element].mimo3_max;
595 595
@@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
691 ((txp->delta_20_in_40 & 0xf0) >> 4), 691 ((txp->delta_20_in_40 & 0xf0) >> 4),
692 (txp->delta_20_in_40 & 0x0f)); 692 (txp->delta_20_in_40 & 0x0f));
693 693
694 max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx, 694 max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
695 &max_txp_avg_halfdbm); 695 &max_txp_avg_halfdbm);
696 696
697 /* 697 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index ab2f4d7500a4..3ee23134c02b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -199,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
199 WIPHY_FLAG_DISABLE_BEACON_HINTS | 199 WIPHY_FLAG_DISABLE_BEACON_HINTS |
200 WIPHY_FLAG_IBSS_RSN; 200 WIPHY_FLAG_IBSS_RSN;
201 201
202#ifdef CONFIG_PM_SLEEP
202 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && 203 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
203 priv->trans->ops->wowlan_suspend && 204 priv->trans->ops->wowlan_suspend &&
204 device_can_wakeup(priv->trans->dev)) { 205 device_can_wakeup(priv->trans->dev)) {
@@ -217,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
217 hw->wiphy->wowlan.pattern_max_len = 218 hw->wiphy->wowlan.pattern_max_len =
218 IWLAGN_WOWLAN_MAX_PATTERN_LEN; 219 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
219 } 220 }
221#endif
220 222
221 if (iwlwifi_mod_params.power_save) 223 if (iwlwifi_mod_params.power_save)
222 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 224 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -249,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
249 ret = ieee80211_register_hw(priv->hw); 251 ret = ieee80211_register_hw(priv->hw);
250 if (ret) { 252 if (ret) {
251 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 253 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
254 iwl_leds_exit(priv);
252 return ret; 255 return ret;
253 } 256 }
254 priv->mac80211_registered = 1; 257 priv->mac80211_registered = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
deleted file mode 100644
index f166955340fe..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ /dev/null
@@ -1,288 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/slab.h>
65#include <linux/string.h>
66
67#include "iwl-debug.h"
68#include "iwl-dev.h"
69
70#include "iwl-phy-db.h"
71
72#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
73
74struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
75{
76 struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
77 GFP_KERNEL);
78
79 if (!phy_db)
80 return phy_db;
81
82 phy_db->dev = dev;
83
84 /* TODO: add default values of the phy db. */
85 return phy_db;
86}
87
88/*
89 * get phy db section: returns a pointer to a phy db section specified by
90 * type and channel group id.
91 */
92static struct iwl_phy_db_entry *
93iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
94 enum iwl_phy_db_section_type type,
95 u16 chg_id)
96{
97 if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
98 return NULL;
99
100 switch (type) {
101 case IWL_PHY_DB_CFG:
102 return &phy_db->cfg;
103 case IWL_PHY_DB_CALIB_NCH:
104 return &phy_db->calib_nch;
105 case IWL_PHY_DB_CALIB_CH:
106 return &phy_db->calib_ch;
107 case IWL_PHY_DB_CALIB_CHG_PAPD:
108 if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
109 return NULL;
110 return &phy_db->calib_ch_group_papd[chg_id];
111 case IWL_PHY_DB_CALIB_CHG_TXP:
112 if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
113 return NULL;
114 return &phy_db->calib_ch_group_txp[chg_id];
115 default:
116 return NULL;
117 }
118 return NULL;
119}
120
121static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
122 enum iwl_phy_db_section_type type,
123 u16 chg_id)
124{
125 struct iwl_phy_db_entry *entry =
126 iwl_phy_db_get_section(phy_db, type, chg_id);
127 if (!entry)
128 return;
129
130 kfree(entry->data);
131 entry->data = NULL;
132 entry->size = 0;
133}
134
135void iwl_phy_db_free(struct iwl_phy_db *phy_db)
136{
137 int i;
138
139 if (!phy_db)
140 return;
141
142 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
143 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
144 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
145 for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
146 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
147 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
148 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
149
150 kfree(phy_db);
151}
152
153int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
154 enum iwl_phy_db_section_type type, u8 *data,
155 u16 size, gfp_t alloc_ctx)
156{
157 struct iwl_phy_db_entry *entry;
158 u16 chg_id = 0;
159
160 if (!phy_db)
161 return -EINVAL;
162
163 if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
164 type == IWL_PHY_DB_CALIB_CHG_TXP)
165 chg_id = le16_to_cpup((__le16 *)data);
166
167 entry = iwl_phy_db_get_section(phy_db, type, chg_id);
168 if (!entry)
169 return -EINVAL;
170
171 kfree(entry->data);
172 entry->data = kmemdup(data, size, alloc_ctx);
173 if (!entry->data) {
174 entry->size = 0;
175 return -ENOMEM;
176 }
177
178 entry->size = size;
179
180 if (type == IWL_PHY_DB_CALIB_CH) {
181 phy_db->channel_num = le32_to_cpup((__le32 *)data);
182 phy_db->channel_size =
183 (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
184 }
185
186 return 0;
187}
188
189static int is_valid_channel(u16 ch_id)
190{
191 if (ch_id <= 14 ||
192 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
193 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
194 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
195 return 1;
196 return 0;
197}
198
199static u8 ch_id_to_ch_index(u16 ch_id)
200{
201 if (WARN_ON(!is_valid_channel(ch_id)))
202 return 0xff;
203
204 if (ch_id <= 14)
205 return ch_id - 1;
206 if (ch_id <= 64)
207 return (ch_id + 20) / 4;
208 if (ch_id <= 140)
209 return (ch_id - 12) / 4;
210 return (ch_id - 13) / 4;
211}
212
213
214static u16 channel_id_to_papd(u16 ch_id)
215{
216 if (WARN_ON(!is_valid_channel(ch_id)))
217 return 0xff;
218
219 if (1 <= ch_id && ch_id <= 14)
220 return 0;
221 if (36 <= ch_id && ch_id <= 64)
222 return 1;
223 if (100 <= ch_id && ch_id <= 140)
224 return 2;
225 return 3;
226}
227
228static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
229{
230 struct iwl_phy_db_chg_txp *txp_chg;
231 int i;
232 u8 ch_index = ch_id_to_ch_index(ch_id);
233 if (ch_index == 0xff)
234 return 0xff;
235
236 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
237 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
238 if (!txp_chg)
239 return 0xff;
240 /*
241 * Looking for the first channel group that its max channel is
242 * higher then wanted channel.
243 */
244 if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
245 return i;
246 }
247 return 0xff;
248}
249
250int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
251 enum iwl_phy_db_section_type type, u8 **data,
252 u16 *size, u16 ch_id)
253{
254 struct iwl_phy_db_entry *entry;
255 u32 channel_num;
256 u32 channel_size;
257 u16 ch_group_id = 0;
258 u16 index;
259
260 if (!phy_db)
261 return -EINVAL;
262
263 /* find wanted channel group */
264 if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
265 ch_group_id = channel_id_to_papd(ch_id);
266 else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
267 ch_group_id = channel_id_to_txp(phy_db, ch_id);
268
269 entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
270 if (!entry)
271 return -EINVAL;
272
273 if (type == IWL_PHY_DB_CALIB_CH) {
274 index = ch_id_to_ch_index(ch_id);
275 channel_num = phy_db->channel_num;
276 channel_size = phy_db->channel_size;
277 if (index >= channel_num) {
278 IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
279 return -EINVAL;
280 }
281 *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
282 *size = channel_size;
283 } else {
284 *data = entry->data;
285 *size = entry->size;
286 }
287 return 0;
288}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
deleted file mode 100644
index c34c6a9303ab..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ /dev/null
@@ -1,129 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_PHYDB_H__
65#define __IWL_PHYDB_H__
66
67#include <linux/types.h>
68
69#define IWL_NUM_PAPD_CH_GROUPS 4
70#define IWL_NUM_TXP_CH_GROUPS 8
71
72struct iwl_phy_db_entry {
73 u16 size;
74 u8 *data;
75};
76
77struct iwl_shared;
78
79/**
80 * struct iwl_phy_db - stores phy configuration and calibration data.
81 *
82 * @cfg: phy configuration.
83 * @calib_nch: non channel specific calibration data.
84 * @calib_ch: channel specific calibration data.
85 * @calib_ch_group_papd: calibration data related to papd channel group.
86 * @calib_ch_group_txp: calibration data related to tx power chanel group.
87 */
88struct iwl_phy_db {
89 struct iwl_phy_db_entry cfg;
90 struct iwl_phy_db_entry calib_nch;
91 struct iwl_phy_db_entry calib_ch;
92 struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
93 struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
94
95 u32 channel_num;
96 u32 channel_size;
97
98 /* for an access to the logger */
99 struct device *dev;
100};
101
102enum iwl_phy_db_section_type {
103 IWL_PHY_DB_CFG = 1,
104 IWL_PHY_DB_CALIB_NCH,
105 IWL_PHY_DB_CALIB_CH,
106 IWL_PHY_DB_CALIB_CHG_PAPD,
107 IWL_PHY_DB_CALIB_CHG_TXP,
108 IWL_PHY_DB_MAX
109};
110
111/* for parsing of tx power channel group data that comes from the firmware*/
112struct iwl_phy_db_chg_txp {
113 __le32 space;
114 __le16 max_channel_idx;
115} __packed;
116
117struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
118
119void iwl_phy_db_free(struct iwl_phy_db *phy_db);
120
121int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
122 enum iwl_phy_db_section_type type, u8 *data,
123 u16 size, gfp_t alloc_ctx);
124
125int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
126 enum iwl_phy_db_section_type type, u8 **data,
127 u16 *size, u16 ch_id);
128
129#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 3b1069290fa9..dfd54662e3e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -224,6 +224,7 @@
224#define SCD_TXFACT (SCD_BASE + 0x10) 224#define SCD_TXFACT (SCD_BASE + 0x10)
225#define SCD_ACTIVE (SCD_BASE + 0x14) 225#define SCD_ACTIVE (SCD_BASE + 0x14)
226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) 226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
227#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
227#define SCD_AGGR_SEL (SCD_BASE + 0x248) 228#define SCD_AGGR_SEL (SCD_BASE + 0x248)
228#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) 229#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
229 230
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 6213c05a4b52..e959207c630a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -347,7 +347,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
347void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo, 347void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
348 int sta_id, int tid, int frame_limit, u16 ssn); 348 int sta_id, int tid, int frame_limit, u16 ssn);
349void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 349void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
350 int index, enum dma_data_direction dma_dir); 350 enum dma_data_direction dma_dir);
351int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 351int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
352 struct sk_buff_head *skbs); 352 struct sk_buff_head *skbs);
353int iwl_queue_space(const struct iwl_queue *q); 353int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 21a8a672fbb2..a8750238ee09 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -204,33 +204,39 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
204 for (i = 1; i < num_tbs; i++) 204 for (i = 1; i < num_tbs; i++)
205 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i), 205 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
206 iwl_tfd_tb_get_len(tfd, i), dma_dir); 206 iwl_tfd_tb_get_len(tfd, i), dma_dir);
207
208 tfd->num_tbs = 0;
207} 209}
208 210
209/** 211/**
210 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 212 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
211 * @trans - transport private data 213 * @trans - transport private data
212 * @txq - tx queue 214 * @txq - tx queue
213 * @index - the index of the TFD to be freed 215 * @dma_dir - the direction of the DMA mapping
214 *@dma_dir - the direction of the DMA mapping
215 * 216 *
216 * Does NOT advance any TFD circular buffer read/write indexes 217 * Does NOT advance any TFD circular buffer read/write indexes
217 * Does NOT free the TFD itself (which is within circular buffer) 218 * Does NOT free the TFD itself (which is within circular buffer)
218 */ 219 */
219void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 220void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
220 int index, enum dma_data_direction dma_dir) 221 enum dma_data_direction dma_dir)
221{ 222{
222 struct iwl_tfd *tfd_tmp = txq->tfds; 223 struct iwl_tfd *tfd_tmp = txq->tfds;
223 224
225 /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
226 int rd_ptr = txq->q.read_ptr;
227 int idx = get_cmd_index(&txq->q, rd_ptr);
228
224 lockdep_assert_held(&txq->lock); 229 lockdep_assert_held(&txq->lock);
225 230
226 iwlagn_unmap_tfd(trans, &txq->entries[index].meta, 231 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
227 &tfd_tmp[index], dma_dir); 232 iwlagn_unmap_tfd(trans, &txq->entries[idx].meta,
233 &tfd_tmp[rd_ptr], dma_dir);
228 234
229 /* free SKB */ 235 /* free SKB */
230 if (txq->entries) { 236 if (txq->entries) {
231 struct sk_buff *skb; 237 struct sk_buff *skb;
232 238
233 skb = txq->entries[index].skb; 239 skb = txq->entries[idx].skb;
234 240
235 /* Can be called from irqs-disabled context 241 /* Can be called from irqs-disabled context
236 * If skb is not NULL, it means that the whole queue is being 242 * If skb is not NULL, it means that the whole queue is being
@@ -238,7 +244,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
238 */ 244 */
239 if (skb) { 245 if (skb) {
240 iwl_op_mode_free_skb(trans->op_mode, skb); 246 iwl_op_mode_free_skb(trans->op_mode, skb);
241 txq->entries[index].skb = NULL; 247 txq->entries[idx].skb = NULL;
242 } 248 }
243 } 249 }
244} 250}
@@ -973,7 +979,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
973 979
974 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 980 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
975 981
976 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE); 982 iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
977 freed++; 983 freed++;
978 } 984 }
979 985
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 2e57161854b9..79c6b91417f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -435,9 +435,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
435 435
436 spin_lock_bh(&txq->lock); 436 spin_lock_bh(&txq->lock);
437 while (q->write_ptr != q->read_ptr) { 437 while (q->write_ptr != q->read_ptr) {
438 /* The read_ptr needs to bound by q->n_window */ 438 iwlagn_txq_free_tfd(trans, txq, dma_dir);
439 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
440 dma_dir);
441 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 439 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
442 } 440 }
443 spin_unlock_bh(&txq->lock); 441 spin_unlock_bh(&txq->lock);
@@ -1060,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans)
1060 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 1058 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
1061 trans_pcie->scd_bc_tbls.dma >> 10); 1059 trans_pcie->scd_bc_tbls.dma >> 10);
1062 1060
1061 /* The chain extension of the SCD doesn't work well. This feature is
1062 * enabled by default by the HW, so we need to disable it manually.
1063 */
1064 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1065
1063 /* Enable DMA channel */ 1066 /* Enable DMA channel */
1064 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) 1067 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1065 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 1068 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index fb787df01666..a0b7cfd34685 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1555 hdr = (struct ieee80211_hdr *) skb->data; 1555 hdr = (struct ieee80211_hdr *) skb->data;
1556 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); 1556 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
1557 } 1557 }
1558 txi->flags |= IEEE80211_TX_STAT_ACK;
1558 } 1559 }
1559 ieee80211_tx_status_irqsafe(data2->hw, skb); 1560 ieee80211_tx_status_irqsafe(data2->hw, skb);
1560 return 0; 1561 return 0;
@@ -1721,6 +1722,24 @@ static void hwsim_exit_netlink(void)
1721 "unregister family %i\n", ret); 1722 "unregister family %i\n", ret);
1722} 1723}
1723 1724
1725static const struct ieee80211_iface_limit hwsim_if_limits[] = {
1726 { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
1727 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
1728 BIT(NL80211_IFTYPE_P2P_CLIENT) |
1729#ifdef CONFIG_MAC80211_MESH
1730 BIT(NL80211_IFTYPE_MESH_POINT) |
1731#endif
1732 BIT(NL80211_IFTYPE_AP) |
1733 BIT(NL80211_IFTYPE_P2P_GO) },
1734};
1735
1736static const struct ieee80211_iface_combination hwsim_if_comb = {
1737 .limits = hwsim_if_limits,
1738 .n_limits = ARRAY_SIZE(hwsim_if_limits),
1739 .max_interfaces = 2048,
1740 .num_different_channels = 1,
1741};
1742
1724static int __init init_mac80211_hwsim(void) 1743static int __init init_mac80211_hwsim(void)
1725{ 1744{
1726 int i, err = 0; 1745 int i, err = 0;
@@ -1782,6 +1801,9 @@ static int __init init_mac80211_hwsim(void)
1782 hw->wiphy->n_addresses = 2; 1801 hw->wiphy->n_addresses = 2;
1783 hw->wiphy->addresses = data->addresses; 1802 hw->wiphy->addresses = data->addresses;
1784 1803
1804 hw->wiphy->iface_combinations = &hwsim_if_comb;
1805 hw->wiphy->n_iface_combinations = 1;
1806
1785 if (fake_hw_scan) { 1807 if (fake_hw_scan) {
1786 hw->wiphy->max_scan_ssids = 255; 1808 hw->wiphy->max_scan_ssids = 255;
1787 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; 1809 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 87671446e24b..015fec3371a0 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -948,6 +948,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
948 bss_cfg->ssid.ssid_len = params->ssid_len; 948 bss_cfg->ssid.ssid_len = params->ssid_len;
949 } 949 }
950 950
951 switch (params->hidden_ssid) {
952 case NL80211_HIDDEN_SSID_NOT_IN_USE:
953 bss_cfg->bcast_ssid_ctl = 1;
954 break;
955 case NL80211_HIDDEN_SSID_ZERO_LEN:
956 bss_cfg->bcast_ssid_ctl = 0;
957 break;
958 case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
959 /* firmware doesn't support this type of hidden SSID */
960 default:
961 return -EINVAL;
962 }
963
951 if (mwifiex_set_secure_params(priv, bss_cfg, params)) { 964 if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
952 kfree(bss_cfg); 965 kfree(bss_cfg);
953 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); 966 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 9f674bbebe65..561452a5c818 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -122,6 +122,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
122#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) 122#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
123#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) 123#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) 124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
125#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
125#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) 126#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
126#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) 127#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
127#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) 128#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -1209,6 +1210,11 @@ struct host_cmd_tlv_ssid {
1209 u8 ssid[0]; 1210 u8 ssid[0];
1210} __packed; 1211} __packed;
1211 1212
1213struct host_cmd_tlv_bcast_ssid {
1214 struct host_cmd_tlv tlv;
1215 u8 bcast_ctl;
1216} __packed;
1217
1212struct host_cmd_tlv_beacon_period { 1218struct host_cmd_tlv_beacon_period {
1213 struct host_cmd_tlv tlv; 1219 struct host_cmd_tlv tlv;
1214 __le16 period; 1220 __le16 period;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 76dfbc42a732..8173ab66066d 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -132,6 +132,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
132 struct host_cmd_tlv_dtim_period *dtim_period; 132 struct host_cmd_tlv_dtim_period *dtim_period;
133 struct host_cmd_tlv_beacon_period *beacon_period; 133 struct host_cmd_tlv_beacon_period *beacon_period;
134 struct host_cmd_tlv_ssid *ssid; 134 struct host_cmd_tlv_ssid *ssid;
135 struct host_cmd_tlv_bcast_ssid *bcast_ssid;
135 struct host_cmd_tlv_channel_band *chan_band; 136 struct host_cmd_tlv_channel_band *chan_band;
136 struct host_cmd_tlv_frag_threshold *frag_threshold; 137 struct host_cmd_tlv_frag_threshold *frag_threshold;
137 struct host_cmd_tlv_rts_threshold *rts_threshold; 138 struct host_cmd_tlv_rts_threshold *rts_threshold;
@@ -153,6 +154,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
153 cmd_size += sizeof(struct host_cmd_tlv) + 154 cmd_size += sizeof(struct host_cmd_tlv) +
154 bss_cfg->ssid.ssid_len; 155 bss_cfg->ssid.ssid_len;
155 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; 156 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
157
158 bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
159 bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
160 bcast_ssid->tlv.len =
161 cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
162 bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
163 cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
164 tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
156 } 165 }
157 if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { 166 if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
158 chan_band = (struct host_cmd_tlv_channel_band *)tlv; 167 chan_band = (struct host_cmd_tlv_channel_band *)tlv;
@@ -416,6 +425,7 @@ int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
416 if (!bss_cfg) 425 if (!bss_cfg)
417 return -ENOMEM; 426 return -ENOMEM;
418 427
428 mwifiex_set_sys_config_invalid_data(bss_cfg);
419 bss_cfg->band_cfg = BAND_CONFIG_MANUAL; 429 bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
420 bss_cfg->channel = channel; 430 bss_cfg->channel = channel;
421 431
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index ca36cccaba31..8f754025b06e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -396,8 +396,7 @@ struct rt2x00_intf {
396 * for hardware which doesn't support hardware 396 * for hardware which doesn't support hardware
397 * sequence counting. 397 * sequence counting.
398 */ 398 */
399 spinlock_t seqlock; 399 atomic_t seqno;
400 u16 seqno;
401}; 400};
402 401
403static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) 402static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index b49773ef72f2..dd24b2663b5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
277 else 277 else
278 rt2x00dev->intf_sta_count++; 278 rt2x00dev->intf_sta_count++;
279 279
280 spin_lock_init(&intf->seqlock);
281 mutex_init(&intf->beacon_skb_mutex); 280 mutex_init(&intf->beacon_skb_mutex);
282 intf->beacon = entry; 281 intf->beacon = entry;
283 282
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4c662eccf53c..2fd830103415 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
210 u16 seqno;
210 211
211 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) 212 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
212 return; 213 return;
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
238 * sequence counting per-frame, since those will override the 239 * sequence counting per-frame, since those will override the
239 * sequence counter given by mac80211. 240 * sequence counter given by mac80211.
240 */ 241 */
241 spin_lock(&intf->seqlock);
242
243 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 242 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
244 intf->seqno += 0x10; 243 seqno = atomic_add_return(0x10, &intf->seqno);
245 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 244 else
246 hdr->seq_ctrl |= cpu_to_le16(intf->seqno); 245 seqno = atomic_read(&intf->seqno);
247
248 spin_unlock(&intf->seqlock);
249 246
247 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
248 hdr->seq_ctrl |= cpu_to_le16(seqno);
250} 249}
251 250
252static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, 251static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
index 2e0de2f5f0f9..c2d5b495c179 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
117 radio_on = true; 117 radio_on = true;
118 } else if (radio_on) { 118 } else if (radio_on) {
119 radio_on = false; 119 radio_on = false;
120 cancel_delayed_work_sync(&priv->led_on); 120 cancel_delayed_work(&priv->led_on);
121 ieee80211_queue_delayed_work(hw, &priv->led_off, 0); 121 ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
122 } 122 }
123 } else if (radio_on) { 123 } else if (radio_on) {
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 1b851f650e07..e2750a12c6f1 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -260,6 +260,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
260 } 260 }
261 261
262 if (wl->irq) { 262 if (wl->irq) {
263 irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
263 ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl); 264 ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
264 if (ret < 0) { 265 if (ret < 0) {
265 wl1251_error("request_irq() failed: %d", ret); 266 wl1251_error("request_irq() failed: %d", ret);
@@ -267,7 +268,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
267 } 268 }
268 269
269 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 270 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
270 disable_irq(wl->irq);
271 271
272 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; 272 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
273 wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq; 273 wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 6248c354fc5c..87f6305bda2c 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -281,6 +281,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
281 281
282 wl->use_eeprom = pdata->use_eeprom; 282 wl->use_eeprom = pdata->use_eeprom;
283 283
284 irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
284 ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl); 285 ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
285 if (ret < 0) { 286 if (ret < 0) {
286 wl1251_error("request_irq() failed: %d", ret); 287 wl1251_error("request_irq() failed: %d", ret);
@@ -289,8 +290,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
289 290
290 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 291 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
291 292
292 disable_irq(wl->irq);
293
294 ret = wl1251_init_ieee80211(wl); 293 ret = wl1251_init_ieee80211(wl);
295 if (ret) 294 if (ret)
296 goto out_irq; 295 goto out_irq;
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 509aa881d790..f3d6fa508269 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1715,6 +1715,7 @@ out:
1715 1715
1716} 1716}
1717 1717
1718#ifdef CONFIG_PM
1718/* Set the global behaviour of RX filters - On/Off + default action */ 1719/* Set the global behaviour of RX filters - On/Off + default action */
1719int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable, 1720int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
1720 enum rx_filter_action action) 1721 enum rx_filter_action action)
@@ -1794,3 +1795,4 @@ out:
1794 kfree(acx); 1795 kfree(acx);
1795 return ret; 1796 return ret;
1796} 1797}
1798#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 8106b2ebfe60..e6a74869a5ff 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1330,9 +1330,11 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
1330int wl1271_acx_fm_coex(struct wl1271 *wl); 1330int wl1271_acx_fm_coex(struct wl1271 *wl);
1331int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl); 1331int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
1332int wl12xx_acx_config_hangover(struct wl1271 *wl); 1332int wl12xx_acx_config_hangover(struct wl1271 *wl);
1333
1334#ifdef CONFIG_PM
1333int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable, 1335int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
1334 enum rx_filter_action action); 1336 enum rx_filter_action action);
1335int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable, 1337int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
1336 struct wl12xx_rx_filter *filter); 1338 struct wl12xx_rx_filter *filter);
1337 1339#endif /* CONFIG_PM */
1338#endif /* __WL1271_ACX_H__ */ 1340#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 1f1d9488dfb6..d6a3c6b07827 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -279,6 +279,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
279 wl12xx_rearm_rx_streaming(wl, active_hlids); 279 wl12xx_rearm_rx_streaming(wl, active_hlids);
280} 280}
281 281
282#ifdef CONFIG_PM
282int wl1271_rx_filter_enable(struct wl1271 *wl, 283int wl1271_rx_filter_enable(struct wl1271 *wl,
283 int index, bool enable, 284 int index, bool enable,
284 struct wl12xx_rx_filter *filter) 285 struct wl12xx_rx_filter *filter)
@@ -314,3 +315,4 @@ void wl1271_rx_filter_clear_all(struct wl1271 *wl)
314 wl1271_rx_filter_enable(wl, i, 0, NULL); 315 wl1271_rx_filter_enable(wl, i, 0, NULL);
315 } 316 }
316} 317}
318#endif /* CONFIG_PM */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 2596401308a8..f4a6fcaeffb1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -325,8 +325,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
325 unsigned int count; 325 unsigned int count;
326 int i, copy_off; 326 int i, copy_off;
327 327
328 count = DIV_ROUND_UP( 328 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
329 offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
330 329
331 copy_off = skb_headlen(skb) % PAGE_SIZE; 330 copy_off = skb_headlen(skb) % PAGE_SIZE;
332 331
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index 46f4a9f9f5e4..281f18c2fb82 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -232,7 +232,7 @@ static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
232 232
233static int check_crc(u8 *buf, int buflen) 233static int check_crc(u8 *buf, int buflen)
234{ 234{
235 u8 len; 235 int len;
236 u16 crc; 236 u16 crc;
237 237
238 len = buf[0] + 1; 238 len = buf[0] + 1;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 447e83472c01..77cb54a65cde 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1744,6 +1744,11 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1744 if (target_state == PCI_POWER_ERROR) 1744 if (target_state == PCI_POWER_ERROR)
1745 return -EIO; 1745 return -EIO;
1746 1746
1747 /* Some devices mustn't be in D3 during system sleep */
1748 if (target_state == PCI_D3hot &&
1749 (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
1750 return 0;
1751
1747 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1752 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1748 1753
1749 error = pci_set_power_state(dev, target_state); 1754 error = pci_set_power_state(dev, target_state);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2a7521677541..194b243a2817 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2929,6 +2929,32 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
2929DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); 2929DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
2930DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); 2930DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
2931 2931
2932/*
2933 * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
2934 * ASUS motherboards will cause memory corruption or a system crash
2935 * if they are in D3 while the system is put into S3 sleep.
2936 */
2937static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
2938{
2939 const char *sys_info;
2940 static const char good_Asus_board[] = "P8Z68-V";
2941
2942 if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
2943 return;
2944 if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
2945 return;
2946 sys_info = dmi_get_system_info(DMI_BOARD_NAME);
2947 if (sys_info && memcmp(sys_info, good_Asus_board,
2948 sizeof(good_Asus_board) - 1) == 0)
2949 return;
2950
2951 dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
2952 dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
2953 device_set_wakeup_capable(&dev->dev, false);
2954}
2955DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
2956DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
2957
2932static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, 2958static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
2933 struct pci_fixup *end) 2959 struct pci_fixup *end)
2934{ 2960{
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index c3b331b74fa0..0cc053af70bd 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps);
61 list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ 61 list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
62 for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ 62 for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
63 _i_ < _maps_node_->num_maps; \ 63 _i_ < _maps_node_->num_maps; \
64 i++, _map_ = &_maps_node_->maps[_i_]) 64 _i_++, _map_ = &_maps_node_->maps[_i_])
65 65
66/** 66/**
67 * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support 67 * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index f6e7c670906c..dd6d93aa5334 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -27,16 +27,16 @@
27#include "core.h" 27#include "core.h"
28#include "pinctrl-imx.h" 28#include "pinctrl-imx.h"
29 29
30#define IMX_PMX_DUMP(info, p, m, c, n) \ 30#define IMX_PMX_DUMP(info, p, m, c, n) \
31{ \ 31{ \
32 int i, j; \ 32 int i, j; \
33 printk("Format: Pin Mux Config\n"); \ 33 printk(KERN_DEBUG "Format: Pin Mux Config\n"); \
34 for (i = 0; i < n; i++) { \ 34 for (i = 0; i < n; i++) { \
35 j = p[i]; \ 35 j = p[i]; \
36 printk("%s %d 0x%lx\n", \ 36 printk(KERN_DEBUG "%s %d 0x%lx\n", \
37 info->pins[j].name, \ 37 info->pins[j].name, \
38 m[i], c[i]); \ 38 m[i], c[i]); \
39 } \ 39 } \
40} 40}
41 41
42/* The bits in CONFIG cell defined in binding doc*/ 42/* The bits in CONFIG cell defined in binding doc*/
@@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
173 173
174 /* create mux map */ 174 /* create mux map */
175 parent = of_get_parent(np); 175 parent = of_get_parent(np);
176 if (!parent) 176 if (!parent) {
177 kfree(new_map);
177 return -EINVAL; 178 return -EINVAL;
179 }
178 new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; 180 new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
179 new_map[0].data.mux.function = parent->name; 181 new_map[0].data.mux.function = parent->name;
180 new_map[0].data.mux.group = np->name; 182 new_map[0].data.mux.group = np->name;
@@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
193 } 195 }
194 196
195 dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", 197 dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
196 new_map->data.mux.function, new_map->data.mux.group, map_num); 198 (*map)->data.mux.function, (*map)->data.mux.group, map_num);
197 199
198 return 0; 200 return 0;
199} 201}
@@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
201static void imx_dt_free_map(struct pinctrl_dev *pctldev, 203static void imx_dt_free_map(struct pinctrl_dev *pctldev,
202 struct pinctrl_map *map, unsigned num_maps) 204 struct pinctrl_map *map, unsigned num_maps)
203{ 205{
204 int i; 206 kfree(map);
205
206 for (i = 0; i < num_maps; i++)
207 kfree(map);
208} 207}
209 208
210static struct pinctrl_ops imx_pctrl_ops = { 209static struct pinctrl_ops imx_pctrl_ops = {
@@ -475,9 +474,8 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
475 grp->configs[j] = config & ~IMX_PAD_SION; 474 grp->configs[j] = config & ~IMX_PAD_SION;
476 } 475 }
477 476
478#ifdef DEBUG
479 IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); 477 IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
480#endif 478
481 return 0; 479 return 0;
482} 480}
483 481
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
index 556e45a213eb..4ba4636b6a4a 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
107 107
108 /* Compose group name */ 108 /* Compose group name */
109 group = kzalloc(length, GFP_KERNEL); 109 group = kzalloc(length, GFP_KERNEL);
110 if (!group) 110 if (!group) {
111 return -ENOMEM; 111 ret = -ENOMEM;
112 goto free;
113 }
112 snprintf(group, length, "%s.%d", np->name, reg); 114 snprintf(group, length, "%s.%d", np->name, reg);
113 new_map[i].data.mux.group = group; 115 new_map[i].data.mux.group = group;
114 i++; 116 i++;
@@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
118 pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); 120 pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL);
119 if (!pconfig) { 121 if (!pconfig) {
120 ret = -ENOMEM; 122 ret = -ENOMEM;
121 goto free; 123 goto free_group;
122 } 124 }
123 125
124 new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; 126 new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
@@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
133 135
134 return 0; 136 return 0;
135 137
138free_group:
139 if (!purecfg)
140 kfree(group);
136free: 141free:
137 kfree(new_map); 142 kfree(new_map);
138 return ret; 143 return ret;
@@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev,
511 return 0; 516 return 0;
512 517
513err: 518err:
519 platform_set_drvdata(pdev, NULL);
514 iounmap(d->base); 520 iounmap(d->base);
515 return ret; 521 return ret;
516} 522}
@@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev)
520{ 526{
521 struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); 527 struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
522 528
529 platform_set_drvdata(pdev, NULL);
523 pinctrl_unregister(d->pctl); 530 pinctrl_unregister(d->pctl);
524 iounmap(d->base); 531 iounmap(d->base);
525 532
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index b8e01c3eaa95..3e7e47d6b385 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/irqdomain.h> 25#include <linux/irqdomain.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/of_device.h>
27#include <linux/pinctrl/pinctrl.h> 28#include <linux/pinctrl/pinctrl.h>
28#include <linux/pinctrl/pinmux.h> 29#include <linux/pinctrl/pinmux.h>
29#include <linux/pinctrl/pinconf.h> 30#include <linux/pinctrl/pinconf.h>
@@ -672,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
672 * wakeup is anyhow controlled by the RIMSC and FIMSC registers. 673 * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
673 */ 674 */
674 if (nmk_chip->sleepmode && on) { 675 if (nmk_chip->sleepmode && on) {
675 __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base, 676 __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
676 NMK_GPIO_SLPM_WAKEUP_ENABLE); 677 NMK_GPIO_SLPM_WAKEUP_ENABLE);
677 } 678 }
678 679
@@ -1245,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
1245 ret = PTR_ERR(clk); 1246 ret = PTR_ERR(clk);
1246 goto out_unmap; 1247 goto out_unmap;
1247 } 1248 }
1249 clk_prepare(clk);
1248 1250
1249 nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); 1251 nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
1250 if (!nmk_chip) { 1252 if (!nmk_chip) {
@@ -1436,7 +1438,27 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
1436 1438
1437 dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins); 1439 dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
1438 1440
1439 /* Handle this special glitch on altfunction C */ 1441 /*
1442 * If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
1443 * we may pass through an undesired state. In this case we take
1444 * some extra care.
1445 *
1446 * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
1447 * - Save SLPM registers (since we have a shadow register in the
1448 * nmk_chip we're using that as backup)
1449 * - Set SLPM=0 for the IOs you want to switch and others to 1
1450 * - Configure the GPIO registers for the IOs that are being switched
1451 * - Set IOFORCE=1
1452 * - Modify the AFLSA/B registers for the IOs that are being switched
1453 * - Set IOFORCE=0
1454 * - Restore SLPM registers
1455 * - Any spurious wake up event during switch sequence to be ignored
1456 * and cleared
1457 *
1458 * We REALLY need to save ALL slpm registers, because the external
1459 * IOFORCE will switch *all* ports to their sleepmode setting to as
1460 * to avoid glitches. (Not just one port!)
1461 */
1440 glitch = (g->altsetting == NMK_GPIO_ALT_C); 1462 glitch = (g->altsetting == NMK_GPIO_ALT_C);
1441 1463
1442 if (glitch) { 1464 if (glitch) {
@@ -1688,18 +1710,34 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
1688 .owner = THIS_MODULE, 1710 .owner = THIS_MODULE,
1689}; 1711};
1690 1712
1713static const struct of_device_id nmk_pinctrl_match[] = {
1714 {
1715 .compatible = "stericsson,nmk_pinctrl",
1716 .data = (void *)PINCTRL_NMK_DB8500,
1717 },
1718 {},
1719};
1720
1691static int __devinit nmk_pinctrl_probe(struct platform_device *pdev) 1721static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
1692{ 1722{
1693 const struct platform_device_id *platid = platform_get_device_id(pdev); 1723 const struct platform_device_id *platid = platform_get_device_id(pdev);
1724 struct device_node *np = pdev->dev.of_node;
1694 struct nmk_pinctrl *npct; 1725 struct nmk_pinctrl *npct;
1726 unsigned int version = 0;
1695 int i; 1727 int i;
1696 1728
1697 npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL); 1729 npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL);
1698 if (!npct) 1730 if (!npct)
1699 return -ENOMEM; 1731 return -ENOMEM;
1700 1732
1733 if (platid)
1734 version = platid->driver_data;
1735 else if (np)
1736 version = (unsigned int)
1737 of_match_device(nmk_pinctrl_match, &pdev->dev)->data;
1738
1701 /* Poke in other ASIC variants here */ 1739 /* Poke in other ASIC variants here */
1702 if (platid->driver_data == PINCTRL_NMK_DB8500) 1740 if (version == PINCTRL_NMK_DB8500)
1703 nmk_pinctrl_db8500_init(&npct->soc); 1741 nmk_pinctrl_db8500_init(&npct->soc);
1704 1742
1705 /* 1743 /*
@@ -1758,6 +1796,7 @@ static struct platform_driver nmk_pinctrl_driver = {
1758 .driver = { 1796 .driver = {
1759 .owner = THIS_MODULE, 1797 .owner = THIS_MODULE,
1760 .name = "pinctrl-nomadik", 1798 .name = "pinctrl-nomadik",
1799 .of_match_table = nmk_pinctrl_match,
1761 }, 1800 },
1762 .probe = nmk_pinctrl_probe, 1801 .probe = nmk_pinctrl_probe,
1763 .id_table = nmk_pinctrl_id, 1802 .id_table = nmk_pinctrl_id,
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index ba15b1a29e52..e9f8e7d11001 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1184,7 +1184,7 @@ out_no_gpio_remap:
1184 return ret; 1184 return ret;
1185} 1185}
1186 1186
1187static const struct of_device_id pinmux_ids[] = { 1187static const struct of_device_id pinmux_ids[] __devinitconst = {
1188 { .compatible = "sirf,prima2-gpio-pinmux" }, 1188 { .compatible = "sirf,prima2-gpio-pinmux" },
1189 {} 1189 {}
1190}; 1190};
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 5ae50aadf885..b3f6b2873fdd 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr pinmux 2 * Driver for the ST Microelectronics SPEAr pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * Inspired from: 7 * Inspired from:
8 * - U300 Pinctl drivers 8 * - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 9155783bb47f..d950eb78d939 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
2 * Driver header file for the ST Microelectronics SPEAr pinmux 2 * Driver header file for the ST Microelectronics SPEAr pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index fff168be7f00..d6cca8c81b92 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr1310 pinmux 2 * Driver for the ST Microelectronics SPEAr1310 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -2192,7 +2192,7 @@ static void __exit spear1310_pinctrl_exit(void)
2192} 2192}
2193module_exit(spear1310_pinctrl_exit); 2193module_exit(spear1310_pinctrl_exit);
2194 2194
2195MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 2195MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
2196MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver"); 2196MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
2197MODULE_LICENSE("GPL v2"); 2197MODULE_LICENSE("GPL v2");
2198MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match); 2198MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index a8ab2a6f51bf..a0eb057e55bd 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr1340 pinmux 2 * Driver for the ST Microelectronics SPEAr1340 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -1983,7 +1983,7 @@ static void __exit spear1340_pinctrl_exit(void)
1983} 1983}
1984module_exit(spear1340_pinctrl_exit); 1984module_exit(spear1340_pinctrl_exit);
1985 1985
1986MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 1986MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
1987MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver"); 1987MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
1988MODULE_LICENSE("GPL v2"); 1988MODULE_LICENSE("GPL v2");
1989MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match); 1989MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index 9c82a35e4e78..4dfc2849b172 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr300 pinmux 2 * Driver for the ST Microelectronics SPEAr300 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -702,7 +702,7 @@ static void __exit spear300_pinctrl_exit(void)
702} 702}
703module_exit(spear300_pinctrl_exit); 703module_exit(spear300_pinctrl_exit);
704 704
705MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 705MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
706MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver"); 706MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
707MODULE_LICENSE("GPL v2"); 707MODULE_LICENSE("GPL v2");
708MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match); 708MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 1a9707605125..96883693fb7e 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr310 pinmux 2 * Driver for the ST Microelectronics SPEAr310 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -425,7 +425,7 @@ static void __exit spear310_pinctrl_exit(void)
425} 425}
426module_exit(spear310_pinctrl_exit); 426module_exit(spear310_pinctrl_exit);
427 427
428MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 428MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
429MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver"); 429MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
430MODULE_LICENSE("GPL v2"); 430MODULE_LICENSE("GPL v2");
431MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match); 431MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index de726e6c283a..020b1e0bdb3e 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr320 pinmux 2 * Driver for the ST Microelectronics SPEAr320 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -3462,7 +3462,7 @@ static void __exit spear320_pinctrl_exit(void)
3462} 3462}
3463module_exit(spear320_pinctrl_exit); 3463module_exit(spear320_pinctrl_exit);
3464 3464
3465MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 3465MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
3466MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver"); 3466MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
3467MODULE_LICENSE("GPL v2"); 3467MODULE_LICENSE("GPL v2");
3468MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match); 3468MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 91c883bc46a6..0242378f7cb8 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr3xx pinmux 2 * Driver for the ST Microelectronics SPEAr3xx pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 5d5fdd8df7b8..31f44347f17c 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
2 * Header file for the ST Microelectronics SPEAr3xx pinmux 2 * Header file for the ST Microelectronics SPEAr3xx pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c1a3fd8e1243..ce875dc365e5 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -523,6 +523,30 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
523 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"), 523 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
524 }, 524 },
525 }, 525 },
526 {
527 .callback = video_set_backlight_video_vendor,
528 .ident = "Acer Extensa 5235",
529 .matches = {
530 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
531 DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
532 },
533 },
534 {
535 .callback = video_set_backlight_video_vendor,
536 .ident = "Acer TravelMate 5760",
537 .matches = {
538 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
539 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
540 },
541 },
542 {
543 .callback = video_set_backlight_video_vendor,
544 .ident = "Acer Aspire 5750",
545 .matches = {
546 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
547 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
548 },
549 },
526 {} 550 {}
527}; 551};
528 552
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 639db4d0aa76..2fd9d36acd15 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * (C) 2009 - Peter Feuerer peter (a) piie.net 6 * (C) 2009 - Peter Feuerer peter (a) piie.net
7 * http://piie.net 7 * http://piie.net
8 * 2009 Borislav Petkov <petkovbb@gmail.com> 8 * 2009 Borislav Petkov bp (a) alien8.de
9 * 9 *
10 * Inspired by and many thanks to: 10 * Inspired by and many thanks to:
11 * o acerfand - Rachel Greenham 11 * o acerfand - Rachel Greenham
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 8a582bdfdc76..694a15a56230 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -87,6 +87,9 @@ static int gmux_update_status(struct backlight_device *bd)
87 struct apple_gmux_data *gmux_data = bl_get_data(bd); 87 struct apple_gmux_data *gmux_data = bl_get_data(bd);
88 u32 brightness = bd->props.brightness; 88 u32 brightness = bd->props.brightness;
89 89
90 if (bd->props.state & BL_CORE_SUSPENDED)
91 return 0;
92
90 /* 93 /*
91 * Older gmux versions require writing out lower bytes first then 94 * Older gmux versions require writing out lower bytes first then
92 * setting the upper byte to 0 to flush the values. Newer versions 95 * setting the upper byte to 0 to flush the values. Newer versions
@@ -102,6 +105,7 @@ static int gmux_update_status(struct backlight_device *bd)
102} 105}
103 106
104static const struct backlight_ops gmux_bl_ops = { 107static const struct backlight_ops gmux_bl_ops = {
108 .options = BL_CORE_SUSPENDRESUME,
105 .get_brightness = gmux_get_brightness, 109 .get_brightness = gmux_get_brightness,
106 .update_status = gmux_update_status, 110 .update_status = gmux_update_status,
107}; 111};
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index e6c08ee8d46c..5f78aac9b163 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -21,7 +21,6 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/dmi.h> 22#include <linux/dmi.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/rfkill.h>
25#include <linux/power_supply.h> 24#include <linux/power_supply.h>
26#include <linux/acpi.h> 25#include <linux/acpi.h>
27#include <linux/mm.h> 26#include <linux/mm.h>
@@ -90,11 +89,8 @@ static struct platform_driver platform_driver = {
90 89
91static struct platform_device *platform_device; 90static struct platform_device *platform_device;
92static struct backlight_device *dell_backlight_device; 91static struct backlight_device *dell_backlight_device;
93static struct rfkill *wifi_rfkill;
94static struct rfkill *bluetooth_rfkill;
95static struct rfkill *wwan_rfkill;
96 92
97static const struct dmi_system_id __initdata dell_device_table[] = { 93static const struct dmi_system_id dell_device_table[] __initconst = {
98 { 94 {
99 .ident = "Dell laptop", 95 .ident = "Dell laptop",
100 .matches = { 96 .matches = {
@@ -119,96 +115,94 @@ static const struct dmi_system_id __initdata dell_device_table[] = {
119}; 115};
120MODULE_DEVICE_TABLE(dmi, dell_device_table); 116MODULE_DEVICE_TABLE(dmi, dell_device_table);
121 117
122static struct dmi_system_id __devinitdata dell_blacklist[] = { 118static struct dmi_system_id __devinitdata dell_quirks[] = {
123 /* Supported by compal-laptop */
124 {
125 .ident = "Dell Mini 9",
126 .matches = {
127 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
128 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"),
129 },
130 },
131 { 119 {
132 .ident = "Dell Mini 10", 120 .callback = dmi_matched,
121 .ident = "Dell Vostro V130",
133 .matches = { 122 .matches = {
134 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 123 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
135 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"), 124 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
136 }, 125 },
126 .driver_data = &quirk_dell_vostro_v130,
137 }, 127 },
138 { 128 {
139 .ident = "Dell Mini 10v", 129 .callback = dmi_matched,
130 .ident = "Dell Vostro V131",
140 .matches = { 131 .matches = {
141 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 132 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
142 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"), 133 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
143 }, 134 },
135 .driver_data = &quirk_dell_vostro_v130,
144 }, 136 },
145 { 137 {
146 .ident = "Dell Mini 1012", 138 .callback = dmi_matched,
139 .ident = "Dell Vostro 3350",
147 .matches = { 140 .matches = {
148 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 141 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
149 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), 142 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
150 }, 143 },
144 .driver_data = &quirk_dell_vostro_v130,
151 }, 145 },
152 { 146 {
153 .ident = "Dell Inspiron 11z", 147 .callback = dmi_matched,
148 .ident = "Dell Vostro 3555",
154 .matches = { 149 .matches = {
155 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 150 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
156 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"), 151 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
157 }, 152 },
153 .driver_data = &quirk_dell_vostro_v130,
158 }, 154 },
159 { 155 {
160 .ident = "Dell Mini 12", 156 .callback = dmi_matched,
157 .ident = "Dell Inspiron N311z",
161 .matches = { 158 .matches = {
162 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 159 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
163 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"), 160 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
164 }, 161 },
162 .driver_data = &quirk_dell_vostro_v130,
165 }, 163 },
166 {}
167};
168
169static struct dmi_system_id __devinitdata dell_quirks[] = {
170 { 164 {
171 .callback = dmi_matched, 165 .callback = dmi_matched,
172 .ident = "Dell Vostro V130", 166 .ident = "Dell Inspiron M5110",
173 .matches = { 167 .matches = {
174 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 168 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
175 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"), 169 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
176 }, 170 },
177 .driver_data = &quirk_dell_vostro_v130, 171 .driver_data = &quirk_dell_vostro_v130,
178 }, 172 },
179 { 173 {
180 .callback = dmi_matched, 174 .callback = dmi_matched,
181 .ident = "Dell Vostro V131", 175 .ident = "Dell Vostro 3360",
182 .matches = { 176 .matches = {
183 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 177 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
184 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), 178 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
185 }, 179 },
186 .driver_data = &quirk_dell_vostro_v130, 180 .driver_data = &quirk_dell_vostro_v130,
187 }, 181 },
188 { 182 {
189 .callback = dmi_matched, 183 .callback = dmi_matched,
190 .ident = "Dell Vostro 3555", 184 .ident = "Dell Vostro 3460",
191 .matches = { 185 .matches = {
192 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 186 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
193 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"), 187 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"),
194 }, 188 },
195 .driver_data = &quirk_dell_vostro_v130, 189 .driver_data = &quirk_dell_vostro_v130,
196 }, 190 },
197 { 191 {
198 .callback = dmi_matched, 192 .callback = dmi_matched,
199 .ident = "Dell Inspiron N311z", 193 .ident = "Dell Vostro 3560",
200 .matches = { 194 .matches = {
201 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 195 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
202 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"), 196 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"),
203 }, 197 },
204 .driver_data = &quirk_dell_vostro_v130, 198 .driver_data = &quirk_dell_vostro_v130,
205 }, 199 },
206 { 200 {
207 .callback = dmi_matched, 201 .callback = dmi_matched,
208 .ident = "Dell Inspiron M5110", 202 .ident = "Dell Vostro 3450",
209 .matches = { 203 .matches = {
210 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 204 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
211 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"), 205 DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"),
212 }, 206 },
213 .driver_data = &quirk_dell_vostro_v130, 207 .driver_data = &quirk_dell_vostro_v130,
214 }, 208 },
@@ -305,94 +299,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
305 return buffer; 299 return buffer;
306} 300}
307 301
308/* Derived from information in DellWirelessCtl.cpp:
309 Class 17, select 11 is radio control. It returns an array of 32-bit values.
310
311 Input byte 0 = 0: Wireless information
312
313 result[0]: return code
314 result[1]:
315 Bit 0: Hardware switch supported
316 Bit 1: Wifi locator supported
317 Bit 2: Wifi is supported
318 Bit 3: Bluetooth is supported
319 Bit 4: WWAN is supported
320 Bit 5: Wireless keyboard supported
321 Bits 6-7: Reserved
322 Bit 8: Wifi is installed
323 Bit 9: Bluetooth is installed
324 Bit 10: WWAN is installed
325 Bits 11-15: Reserved
326 Bit 16: Hardware switch is on
327 Bit 17: Wifi is blocked
328 Bit 18: Bluetooth is blocked
329 Bit 19: WWAN is blocked
330 Bits 20-31: Reserved
331 result[2]: NVRAM size in bytes
332 result[3]: NVRAM format version number
333
334 Input byte 0 = 2: Wireless switch configuration
335 result[0]: return code
336 result[1]:
337 Bit 0: Wifi controlled by switch
338 Bit 1: Bluetooth controlled by switch
339 Bit 2: WWAN controlled by switch
340 Bits 3-6: Reserved
341 Bit 7: Wireless switch config locked
342 Bit 8: Wifi locator enabled
343 Bits 9-14: Reserved
344 Bit 15: Wifi locator setting locked
345 Bits 16-31: Reserved
346*/
347
348static int dell_rfkill_set(void *data, bool blocked)
349{
350 int disable = blocked ? 1 : 0;
351 unsigned long radio = (unsigned long)data;
352 int hwswitch_bit = (unsigned long)data - 1;
353 int ret = 0;
354
355 get_buffer();
356 dell_send_request(buffer, 17, 11);
357
358 /* If the hardware switch controls this radio, and the hardware
359 switch is disabled, don't allow changing the software state */
360 if ((hwswitch_state & BIT(hwswitch_bit)) &&
361 !(buffer->output[1] & BIT(16))) {
362 ret = -EINVAL;
363 goto out;
364 }
365
366 buffer->input[0] = (1 | (radio<<8) | (disable << 16));
367 dell_send_request(buffer, 17, 11);
368
369out:
370 release_buffer();
371 return ret;
372}
373
374static void dell_rfkill_query(struct rfkill *rfkill, void *data)
375{
376 int status;
377 int bit = (unsigned long)data + 16;
378 int hwswitch_bit = (unsigned long)data - 1;
379
380 get_buffer();
381 dell_send_request(buffer, 17, 11);
382 status = buffer->output[1];
383 release_buffer();
384
385 rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
386
387 if (hwswitch_state & (BIT(hwswitch_bit)))
388 rfkill_set_hw_state(rfkill, !(status & BIT(16)));
389}
390
391static const struct rfkill_ops dell_rfkill_ops = {
392 .set_block = dell_rfkill_set,
393 .query = dell_rfkill_query,
394};
395
396static struct dentry *dell_laptop_dir; 302static struct dentry *dell_laptop_dir;
397 303
398static int dell_debugfs_show(struct seq_file *s, void *data) 304static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -462,108 +368,6 @@ static const struct file_operations dell_debugfs_fops = {
462 .release = single_release, 368 .release = single_release,
463}; 369};
464 370
465static void dell_update_rfkill(struct work_struct *ignored)
466{
467 if (wifi_rfkill)
468 dell_rfkill_query(wifi_rfkill, (void *)1);
469 if (bluetooth_rfkill)
470 dell_rfkill_query(bluetooth_rfkill, (void *)2);
471 if (wwan_rfkill)
472 dell_rfkill_query(wwan_rfkill, (void *)3);
473}
474static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
475
476
477static int __init dell_setup_rfkill(void)
478{
479 int status;
480 int ret;
481
482 if (dmi_check_system(dell_blacklist)) {
483 pr_info("Blacklisted hardware detected - not enabling rfkill\n");
484 return 0;
485 }
486
487 get_buffer();
488 dell_send_request(buffer, 17, 11);
489 status = buffer->output[1];
490 buffer->input[0] = 0x2;
491 dell_send_request(buffer, 17, 11);
492 hwswitch_state = buffer->output[1];
493 release_buffer();
494
495 if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
496 wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
497 RFKILL_TYPE_WLAN,
498 &dell_rfkill_ops, (void *) 1);
499 if (!wifi_rfkill) {
500 ret = -ENOMEM;
501 goto err_wifi;
502 }
503 ret = rfkill_register(wifi_rfkill);
504 if (ret)
505 goto err_wifi;
506 }
507
508 if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
509 bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
510 &platform_device->dev,
511 RFKILL_TYPE_BLUETOOTH,
512 &dell_rfkill_ops, (void *) 2);
513 if (!bluetooth_rfkill) {
514 ret = -ENOMEM;
515 goto err_bluetooth;
516 }
517 ret = rfkill_register(bluetooth_rfkill);
518 if (ret)
519 goto err_bluetooth;
520 }
521
522 if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
523 wwan_rfkill = rfkill_alloc("dell-wwan",
524 &platform_device->dev,
525 RFKILL_TYPE_WWAN,
526 &dell_rfkill_ops, (void *) 3);
527 if (!wwan_rfkill) {
528 ret = -ENOMEM;
529 goto err_wwan;
530 }
531 ret = rfkill_register(wwan_rfkill);
532 if (ret)
533 goto err_wwan;
534 }
535
536 return 0;
537err_wwan:
538 rfkill_destroy(wwan_rfkill);
539 if (bluetooth_rfkill)
540 rfkill_unregister(bluetooth_rfkill);
541err_bluetooth:
542 rfkill_destroy(bluetooth_rfkill);
543 if (wifi_rfkill)
544 rfkill_unregister(wifi_rfkill);
545err_wifi:
546 rfkill_destroy(wifi_rfkill);
547
548 return ret;
549}
550
551static void dell_cleanup_rfkill(void)
552{
553 if (wifi_rfkill) {
554 rfkill_unregister(wifi_rfkill);
555 rfkill_destroy(wifi_rfkill);
556 }
557 if (bluetooth_rfkill) {
558 rfkill_unregister(bluetooth_rfkill);
559 rfkill_destroy(bluetooth_rfkill);
560 }
561 if (wwan_rfkill) {
562 rfkill_unregister(wwan_rfkill);
563 rfkill_destroy(wwan_rfkill);
564 }
565}
566
567static int dell_send_intensity(struct backlight_device *bd) 371static int dell_send_intensity(struct backlight_device *bd)
568{ 372{
569 int ret = 0; 373 int ret = 0;
@@ -655,30 +459,6 @@ static void touchpad_led_exit(void)
655 led_classdev_unregister(&touchpad_led); 459 led_classdev_unregister(&touchpad_led);
656} 460}
657 461
658static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
659 struct serio *port)
660{
661 static bool extended;
662
663 if (str & 0x20)
664 return false;
665
666 if (unlikely(data == 0xe0)) {
667 extended = true;
668 return false;
669 } else if (unlikely(extended)) {
670 switch (data) {
671 case 0x8:
672 schedule_delayed_work(&dell_rfkill_work,
673 round_jiffies_relative(HZ));
674 break;
675 }
676 extended = false;
677 }
678
679 return false;
680}
681
682static int __init dell_init(void) 462static int __init dell_init(void)
683{ 463{
684 int max_intensity = 0; 464 int max_intensity = 0;
@@ -720,26 +500,10 @@ static int __init dell_init(void)
720 goto fail_buffer; 500 goto fail_buffer;
721 buffer = page_address(bufferpage); 501 buffer = page_address(bufferpage);
722 502
723 ret = dell_setup_rfkill();
724
725 if (ret) {
726 pr_warn("Unable to setup rfkill\n");
727 goto fail_rfkill;
728 }
729
730 ret = i8042_install_filter(dell_laptop_i8042_filter);
731 if (ret) {
732 pr_warn("Unable to install key filter\n");
733 goto fail_filter;
734 }
735
736 if (quirks && quirks->touchpad_led) 503 if (quirks && quirks->touchpad_led)
737 touchpad_led_init(&platform_device->dev); 504 touchpad_led_init(&platform_device->dev);
738 505
739 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); 506 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
740 if (dell_laptop_dir != NULL)
741 debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
742 &dell_debugfs_fops);
743 507
744#ifdef CONFIG_ACPI 508#ifdef CONFIG_ACPI
745 /* In the event of an ACPI backlight being available, don't 509 /* In the event of an ACPI backlight being available, don't
@@ -782,11 +546,6 @@ static int __init dell_init(void)
782 return 0; 546 return 0;
783 547
784fail_backlight: 548fail_backlight:
785 i8042_remove_filter(dell_laptop_i8042_filter);
786 cancel_delayed_work_sync(&dell_rfkill_work);
787fail_filter:
788 dell_cleanup_rfkill();
789fail_rfkill:
790 free_page((unsigned long)bufferpage); 549 free_page((unsigned long)bufferpage);
791fail_buffer: 550fail_buffer:
792 platform_device_del(platform_device); 551 platform_device_del(platform_device);
@@ -804,10 +563,7 @@ static void __exit dell_exit(void)
804 debugfs_remove_recursive(dell_laptop_dir); 563 debugfs_remove_recursive(dell_laptop_dir);
805 if (quirks && quirks->touchpad_led) 564 if (quirks && quirks->touchpad_led)
806 touchpad_led_exit(); 565 touchpad_led_exit();
807 i8042_remove_filter(dell_laptop_i8042_filter);
808 cancel_delayed_work_sync(&dell_rfkill_work);
809 backlight_device_unregister(dell_backlight_device); 566 backlight_device_unregister(dell_backlight_device);
810 dell_cleanup_rfkill();
811 if (platform_device) { 567 if (platform_device) {
812 platform_device_unregister(platform_device); 568 platform_device_unregister(platform_device);
813 platform_driver_unregister(&platform_driver); 569 platform_driver_unregister(&platform_driver);
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index 580d80a73c3a..da267eae8ba8 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -16,6 +16,8 @@
16 * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA. 16 * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
17 */ 17 */
18 18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
19#include <linux/kernel.h> 21#include <linux/kernel.h>
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/init.h> 23#include <linux/init.h>
@@ -34,7 +36,8 @@
34#define ACPI_FUJITSU_CLASS "fujitsu" 36#define ACPI_FUJITSU_CLASS "fujitsu"
35 37
36#define INVERT_TABLET_MODE_BIT 0x01 38#define INVERT_TABLET_MODE_BIT 0x01
37#define FORCE_TABLET_MODE_IF_UNDOCK 0x02 39#define INVERT_DOCK_STATE_BIT 0x02
40#define FORCE_TABLET_MODE_IF_UNDOCK 0x04
38 41
39#define KEYMAP_LEN 16 42#define KEYMAP_LEN 16
40 43
@@ -161,6 +164,8 @@ static void fujitsu_send_state(void)
161 state = fujitsu_read_register(0xdd); 164 state = fujitsu_read_register(0xdd);
162 165
163 dock = state & 0x02; 166 dock = state & 0x02;
167 if (fujitsu.config.quirks & INVERT_DOCK_STATE_BIT)
168 dock = !dock;
164 169
165 if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) { 170 if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
166 tablet_mode = 1; 171 tablet_mode = 1;
@@ -221,9 +226,6 @@ static int __devinit input_fujitsu_setup(struct device *parent,
221 input_set_capability(idev, EV_SW, SW_DOCK); 226 input_set_capability(idev, EV_SW, SW_DOCK);
222 input_set_capability(idev, EV_SW, SW_TABLET_MODE); 227 input_set_capability(idev, EV_SW, SW_TABLET_MODE);
223 228
224 input_set_capability(idev, EV_SW, SW_DOCK);
225 input_set_capability(idev, EV_SW, SW_TABLET_MODE);
226
227 error = input_register_device(idev); 229 error = input_register_device(idev);
228 if (error) { 230 if (error) {
229 input_free_device(idev); 231 input_free_device(idev);
@@ -275,25 +277,31 @@ static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
275 return IRQ_HANDLED; 277 return IRQ_HANDLED;
276} 278}
277 279
278static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi) 280static void __devinit fujitsu_dmi_common(const struct dmi_system_id *dmi)
279{ 281{
280 printk(KERN_INFO MODULENAME ": %s\n", dmi->ident); 282 pr_info("%s\n", dmi->ident);
281 memcpy(fujitsu.config.keymap, dmi->driver_data, 283 memcpy(fujitsu.config.keymap, dmi->driver_data,
282 sizeof(fujitsu.config.keymap)); 284 sizeof(fujitsu.config.keymap));
285}
286
287static int __devinit fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
288{
289 fujitsu_dmi_common(dmi);
290 fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
283 return 1; 291 return 1;
284} 292}
285 293
286static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi) 294static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
287{ 295{
288 fujitsu_dmi_default(dmi); 296 fujitsu_dmi_common(dmi);
289 fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK; 297 fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
290 fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT; 298 fujitsu.config.quirks |= INVERT_DOCK_STATE_BIT;
291 return 1; 299 return 1;
292} 300}
293 301
294static struct dmi_system_id dmi_ids[] __initconst = { 302static struct dmi_system_id dmi_ids[] __initconst = {
295 { 303 {
296 .callback = fujitsu_dmi_default, 304 .callback = fujitsu_dmi_lifebook,
297 .ident = "Fujitsu Siemens P/T Series", 305 .ident = "Fujitsu Siemens P/T Series",
298 .matches = { 306 .matches = {
299 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 307 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -302,7 +310,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
302 .driver_data = keymap_Lifebook_Tseries 310 .driver_data = keymap_Lifebook_Tseries
303 }, 311 },
304 { 312 {
305 .callback = fujitsu_dmi_default, 313 .callback = fujitsu_dmi_lifebook,
306 .ident = "Fujitsu Lifebook T Series", 314 .ident = "Fujitsu Lifebook T Series",
307 .matches = { 315 .matches = {
308 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 316 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -320,7 +328,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
320 .driver_data = keymap_Stylistic_Tseries 328 .driver_data = keymap_Stylistic_Tseries
321 }, 329 },
322 { 330 {
323 .callback = fujitsu_dmi_default, 331 .callback = fujitsu_dmi_lifebook,
324 .ident = "Fujitsu LifeBook U810", 332 .ident = "Fujitsu LifeBook U810",
325 .matches = { 333 .matches = {
326 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 334 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -347,7 +355,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
347 .driver_data = keymap_Stylistic_ST5xxx 355 .driver_data = keymap_Stylistic_ST5xxx
348 }, 356 },
349 { 357 {
350 .callback = fujitsu_dmi_default, 358 .callback = fujitsu_dmi_lifebook,
351 .ident = "Unknown (using defaults)", 359 .ident = "Unknown (using defaults)",
352 .matches = { 360 .matches = {
353 DMI_MATCH(DMI_SYS_VENDOR, ""), 361 DMI_MATCH(DMI_SYS_VENDOR, ""),
@@ -473,6 +481,6 @@ module_exit(fujitsu_module_exit);
473MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>"); 481MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
474MODULE_DESCRIPTION("Fujitsu tablet pc extras driver"); 482MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
475MODULE_LICENSE("GPL"); 483MODULE_LICENSE("GPL");
476MODULE_VERSION("2.4"); 484MODULE_VERSION("2.5");
477 485
478MODULE_DEVICE_TABLE(acpi, fujitsu_ids); 486MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 7387f97a2941..24a3ae065f1b 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -2,7 +2,7 @@
2 * hdaps.c - driver for IBM's Hard Drive Active Protection System 2 * hdaps.c - driver for IBM's Hard Drive Active Protection System
3 * 3 *
4 * Copyright (C) 2005 Robert Love <rml@novell.com> 4 * Copyright (C) 2005 Robert Love <rml@novell.com>
5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com> 5 * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
6 * 6 *
7 * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads 7 * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
8 * starting with the R40, T41, and X40. It provides a basic two-axis 8 * starting with the R40, T41, and X40. It provides a basic two-axis
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index e2faa3cbb792..387183a2d6dd 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -634,6 +634,8 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
634 RFKILL_TYPE_WLAN, 634 RFKILL_TYPE_WLAN,
635 &hp_wmi_rfkill_ops, 635 &hp_wmi_rfkill_ops,
636 (void *) HPWMI_WIFI); 636 (void *) HPWMI_WIFI);
637 if (!wifi_rfkill)
638 return -ENOMEM;
637 rfkill_init_sw_state(wifi_rfkill, 639 rfkill_init_sw_state(wifi_rfkill,
638 hp_wmi_get_sw_state(HPWMI_WIFI)); 640 hp_wmi_get_sw_state(HPWMI_WIFI));
639 rfkill_set_hw_state(wifi_rfkill, 641 rfkill_set_hw_state(wifi_rfkill,
@@ -648,6 +650,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
648 RFKILL_TYPE_BLUETOOTH, 650 RFKILL_TYPE_BLUETOOTH,
649 &hp_wmi_rfkill_ops, 651 &hp_wmi_rfkill_ops,
650 (void *) HPWMI_BLUETOOTH); 652 (void *) HPWMI_BLUETOOTH);
653 if (!bluetooth_rfkill) {
654 err = -ENOMEM;
655 goto register_wifi_error;
656 }
651 rfkill_init_sw_state(bluetooth_rfkill, 657 rfkill_init_sw_state(bluetooth_rfkill,
652 hp_wmi_get_sw_state(HPWMI_BLUETOOTH)); 658 hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
653 rfkill_set_hw_state(bluetooth_rfkill, 659 rfkill_set_hw_state(bluetooth_rfkill,
@@ -662,6 +668,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
662 RFKILL_TYPE_WWAN, 668 RFKILL_TYPE_WWAN,
663 &hp_wmi_rfkill_ops, 669 &hp_wmi_rfkill_ops,
664 (void *) HPWMI_WWAN); 670 (void *) HPWMI_WWAN);
671 if (!wwan_rfkill) {
672 err = -ENOMEM;
673 goto register_bluetooth_error;
674 }
665 rfkill_init_sw_state(wwan_rfkill, 675 rfkill_init_sw_state(wwan_rfkill,
666 hp_wmi_get_sw_state(HPWMI_WWAN)); 676 hp_wmi_get_sw_state(HPWMI_WWAN));
667 rfkill_set_hw_state(wwan_rfkill, 677 rfkill_set_hw_state(wwan_rfkill,
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ac902f7a9baa..4f20f8dd3d7c 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -194,7 +194,6 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
194/* 194/*
195 * debugfs 195 * debugfs
196 */ 196 */
197#define DEBUGFS_EVENT_LEN (4096)
198static int debugfs_status_show(struct seq_file *s, void *data) 197static int debugfs_status_show(struct seq_file *s, void *data)
199{ 198{
200 unsigned long value; 199 unsigned long value;
@@ -315,7 +314,7 @@ static int __devinit ideapad_debugfs_init(struct ideapad_private *priv)
315 node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL, 314 node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
316 &debugfs_status_fops); 315 &debugfs_status_fops);
317 if (!node) { 316 if (!node) {
318 pr_err("failed to create event in debugfs"); 317 pr_err("failed to create status in debugfs");
319 goto errout; 318 goto errout;
320 } 319 }
321 320
@@ -785,6 +784,10 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
785 case 9: 784 case 9:
786 ideapad_sync_rfk_state(priv); 785 ideapad_sync_rfk_state(priv);
787 break; 786 break;
787 case 13:
788 case 6:
789 ideapad_input_report(priv, vpc_bit);
790 break;
788 case 4: 791 case 4:
789 ideapad_backlight_notify_brightness(priv); 792 ideapad_backlight_notify_brightness(priv);
790 break; 793 break;
@@ -795,7 +798,7 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
795 ideapad_backlight_notify_power(priv); 798 ideapad_backlight_notify_power(priv);
796 break; 799 break;
797 default: 800 default:
798 ideapad_input_report(priv, vpc_bit); 801 pr_info("Unknown event: %lu\n", vpc_bit);
799 } 802 }
800 } 803 }
801 } 804 }
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 8a51795aa02a..210d4ae547c2 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -141,6 +141,27 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
141 "(default: 0)"); 141 "(default: 0)");
142 142
143static void sony_nc_kbd_backlight_resume(void); 143static void sony_nc_kbd_backlight_resume(void);
144static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
145 unsigned int handle);
146static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
147
148static int sony_nc_battery_care_setup(struct platform_device *pd,
149 unsigned int handle);
150static void sony_nc_battery_care_cleanup(struct platform_device *pd);
151
152static int sony_nc_thermal_setup(struct platform_device *pd);
153static void sony_nc_thermal_cleanup(struct platform_device *pd);
154static void sony_nc_thermal_resume(void);
155
156static int sony_nc_lid_resume_setup(struct platform_device *pd);
157static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
158
159static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
160static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
161
162static int sony_nc_touchpad_setup(struct platform_device *pd,
163 unsigned int handle);
164static void sony_nc_touchpad_cleanup(struct platform_device *pd);
144 165
145enum sony_nc_rfkill { 166enum sony_nc_rfkill {
146 SONY_WIFI, 167 SONY_WIFI,
@@ -153,6 +174,9 @@ enum sony_nc_rfkill {
153static int sony_rfkill_handle; 174static int sony_rfkill_handle;
154static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL]; 175static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
155static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900}; 176static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
177static int sony_nc_rfkill_setup(struct acpi_device *device,
178 unsigned int handle);
179static void sony_nc_rfkill_cleanup(void);
156static void sony_nc_rfkill_update(void); 180static void sony_nc_rfkill_update(void);
157 181
158/*********** Input Devices ***********/ 182/*********** Input Devices ***********/
@@ -691,59 +715,97 @@ static struct acpi_device *sony_nc_acpi_device = NULL;
691 715
692/* 716/*
693 * acpi_evaluate_object wrappers 717 * acpi_evaluate_object wrappers
718 * all useful calls into SNC methods take one or zero parameters and return
719 * integers or arrays.
694 */ 720 */
695static int acpi_callgetfunc(acpi_handle handle, char *name, int *result) 721static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
722 u64 *value)
696{ 723{
697 struct acpi_buffer output; 724 union acpi_object *result = NULL;
698 union acpi_object out_obj; 725 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
699 acpi_status status; 726 acpi_status status;
700 727
701 output.length = sizeof(out_obj); 728 if (value) {
702 output.pointer = &out_obj; 729 struct acpi_object_list params;
730 union acpi_object in;
731 in.type = ACPI_TYPE_INTEGER;
732 in.integer.value = *value;
733 params.count = 1;
734 params.pointer = &in;
735 status = acpi_evaluate_object(handle, method, &params, &output);
736 dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method,
737 (unsigned int)(*value >> 32),
738 (unsigned int)*value & 0xffffffff);
739 } else {
740 status = acpi_evaluate_object(handle, method, NULL, &output);
741 dprintk("__call_snc_method: [%s]\n", method);
742 }
703 743
704 status = acpi_evaluate_object(handle, name, NULL, &output); 744 if (ACPI_FAILURE(status)) {
705 if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) { 745 pr_err("Failed to evaluate [%s]\n", method);
706 *result = out_obj.integer.value; 746 return NULL;
707 return 0;
708 } 747 }
709 748
710 pr_warn("acpi_callreadfunc failed\n"); 749 result = (union acpi_object *) output.pointer;
750 if (!result)
751 dprintk("No return object [%s]\n", method);
711 752
712 return -1; 753 return result;
713} 754}
714 755
715static int acpi_callsetfunc(acpi_handle handle, char *name, int value, 756static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
716 int *result) 757 int *result)
717{ 758{
718 struct acpi_object_list params; 759 union acpi_object *object = NULL;
719 union acpi_object in_obj; 760 if (value) {
720 struct acpi_buffer output; 761 u64 v = *value;
721 union acpi_object out_obj; 762 object = __call_snc_method(handle, name, &v);
722 acpi_status status; 763 } else
723 764 object = __call_snc_method(handle, name, NULL);
724 params.count = 1;
725 params.pointer = &in_obj;
726 in_obj.type = ACPI_TYPE_INTEGER;
727 in_obj.integer.value = value;
728 765
729 output.length = sizeof(out_obj); 766 if (!object)
730 output.pointer = &out_obj; 767 return -EINVAL;
731 768
732 status = acpi_evaluate_object(handle, name, &params, &output); 769 if (object->type != ACPI_TYPE_INTEGER) {
733 if (status == AE_OK) { 770 pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
734 if (result != NULL) { 771 ACPI_TYPE_INTEGER, object->type);
735 if (out_obj.type != ACPI_TYPE_INTEGER) { 772 kfree(object);
736 pr_warn("acpi_evaluate_object bad return type\n"); 773 return -EINVAL;
737 return -1;
738 }
739 *result = out_obj.integer.value;
740 }
741 return 0;
742 } 774 }
743 775
744 pr_warn("acpi_evaluate_object failed\n"); 776 if (result)
777 *result = object->integer.value;
778
779 kfree(object);
780 return 0;
781}
782
783#define MIN(a, b) (a > b ? b : a)
784static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
785 void *buffer, size_t buflen)
786{
787 size_t len = len;
788 union acpi_object *object = __call_snc_method(handle, name, value);
789
790 if (!object)
791 return -EINVAL;
792
793 if (object->type == ACPI_TYPE_BUFFER)
794 len = MIN(buflen, object->buffer.length);
795
796 else if (object->type == ACPI_TYPE_INTEGER)
797 len = MIN(buflen, sizeof(object->integer.value));
798
799 else {
800 pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
801 ACPI_TYPE_BUFFER, object->type);
802 kfree(object);
803 return -EINVAL;
804 }
745 805
746 return -1; 806 memcpy(buffer, object->buffer.pointer, len);
807 kfree(object);
808 return 0;
747} 809}
748 810
749struct sony_nc_handles { 811struct sony_nc_handles {
@@ -770,16 +832,17 @@ static ssize_t sony_nc_handles_show(struct device *dev,
770 832
771static int sony_nc_handles_setup(struct platform_device *pd) 833static int sony_nc_handles_setup(struct platform_device *pd)
772{ 834{
773 int i; 835 int i, r, result, arg;
774 int result;
775 836
776 handles = kzalloc(sizeof(*handles), GFP_KERNEL); 837 handles = kzalloc(sizeof(*handles), GFP_KERNEL);
777 if (!handles) 838 if (!handles)
778 return -ENOMEM; 839 return -ENOMEM;
779 840
780 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { 841 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
781 if (!acpi_callsetfunc(sony_nc_acpi_handle, 842 arg = i + 0x20;
782 "SN00", i + 0x20, &result)) { 843 r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg,
844 &result);
845 if (!r) {
783 dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n", 846 dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
784 result, i); 847 result, i);
785 handles->cap[i] = result; 848 handles->cap[i] = result;
@@ -819,8 +882,8 @@ static int sony_find_snc_handle(int handle)
819 int i; 882 int i;
820 883
821 /* not initialized yet, return early */ 884 /* not initialized yet, return early */
822 if (!handles) 885 if (!handles || !handle)
823 return -1; 886 return -EINVAL;
824 887
825 for (i = 0; i < 0x10; i++) { 888 for (i = 0; i < 0x10; i++) {
826 if (handles->cap[i] == handle) { 889 if (handles->cap[i] == handle) {
@@ -830,21 +893,20 @@ static int sony_find_snc_handle(int handle)
830 } 893 }
831 } 894 }
832 dprintk("handle 0x%.4x not found\n", handle); 895 dprintk("handle 0x%.4x not found\n", handle);
833 return -1; 896 return -EINVAL;
834} 897}
835 898
836static int sony_call_snc_handle(int handle, int argument, int *result) 899static int sony_call_snc_handle(int handle, int argument, int *result)
837{ 900{
838 int ret = 0; 901 int arg, ret = 0;
839 int offset = sony_find_snc_handle(handle); 902 int offset = sony_find_snc_handle(handle);
840 903
841 if (offset < 0) 904 if (offset < 0)
842 return -1; 905 return offset;
843 906
844 ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument, 907 arg = offset | argument;
845 result); 908 ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result);
846 dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument, 909 dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result);
847 *result);
848 return ret; 910 return ret;
849} 911}
850 912
@@ -889,14 +951,16 @@ static int boolean_validate(const int direction, const int value)
889static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr, 951static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
890 char *buffer) 952 char *buffer)
891{ 953{
892 int value; 954 int value, ret = 0;
893 struct sony_nc_value *item = 955 struct sony_nc_value *item =
894 container_of(attr, struct sony_nc_value, devattr); 956 container_of(attr, struct sony_nc_value, devattr);
895 957
896 if (!*item->acpiget) 958 if (!*item->acpiget)
897 return -EIO; 959 return -EIO;
898 960
899 if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0) 961 ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL,
962 &value);
963 if (ret < 0)
900 return -EIO; 964 return -EIO;
901 965
902 if (item->validate) 966 if (item->validate)
@@ -909,7 +973,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
909 struct device_attribute *attr, 973 struct device_attribute *attr,
910 const char *buffer, size_t count) 974 const char *buffer, size_t count)
911{ 975{
912 int value; 976 unsigned long value = 0;
977 int ret = 0;
913 struct sony_nc_value *item = 978 struct sony_nc_value *item =
914 container_of(attr, struct sony_nc_value, devattr); 979 container_of(attr, struct sony_nc_value, devattr);
915 980
@@ -919,7 +984,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
919 if (count > 31) 984 if (count > 31)
920 return -EINVAL; 985 return -EINVAL;
921 986
922 value = simple_strtoul(buffer, NULL, 10); 987 if (kstrtoul(buffer, 10, &value))
988 return -EINVAL;
923 989
924 if (item->validate) 990 if (item->validate)
925 value = item->validate(SNC_VALIDATE_IN, value); 991 value = item->validate(SNC_VALIDATE_IN, value);
@@ -927,8 +993,11 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
927 if (value < 0) 993 if (value < 0)
928 return value; 994 return value;
929 995
930 if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0) 996 ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
997 (int *)&value, NULL);
998 if (ret < 0)
931 return -EIO; 999 return -EIO;
1000
932 item->value = value; 1001 item->value = value;
933 item->valid = 1; 1002 item->valid = 1;
934 return count; 1003 return count;
@@ -948,15 +1017,15 @@ struct sony_backlight_props sony_bl_props;
948 1017
949static int sony_backlight_update_status(struct backlight_device *bd) 1018static int sony_backlight_update_status(struct backlight_device *bd)
950{ 1019{
951 return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", 1020 int arg = bd->props.brightness + 1;
952 bd->props.brightness + 1, NULL); 1021 return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL);
953} 1022}
954 1023
955static int sony_backlight_get_brightness(struct backlight_device *bd) 1024static int sony_backlight_get_brightness(struct backlight_device *bd)
956{ 1025{
957 int value; 1026 int value;
958 1027
959 if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) 1028 if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value))
960 return 0; 1029 return 0;
961 /* brightness levels are 1-based, while backlight ones are 0-based */ 1030 /* brightness levels are 1-based, while backlight ones are 0-based */
962 return value - 1; 1031 return value - 1;
@@ -1024,10 +1093,14 @@ static struct sony_nc_event sony_100_events[] = {
1024 { 0x06, SONYPI_EVENT_FNKEY_RELEASED }, 1093 { 0x06, SONYPI_EVENT_FNKEY_RELEASED },
1025 { 0x87, SONYPI_EVENT_FNKEY_F7 }, 1094 { 0x87, SONYPI_EVENT_FNKEY_F7 },
1026 { 0x07, SONYPI_EVENT_FNKEY_RELEASED }, 1095 { 0x07, SONYPI_EVENT_FNKEY_RELEASED },
1096 { 0x88, SONYPI_EVENT_FNKEY_F8 },
1097 { 0x08, SONYPI_EVENT_FNKEY_RELEASED },
1027 { 0x89, SONYPI_EVENT_FNKEY_F9 }, 1098 { 0x89, SONYPI_EVENT_FNKEY_F9 },
1028 { 0x09, SONYPI_EVENT_FNKEY_RELEASED }, 1099 { 0x09, SONYPI_EVENT_FNKEY_RELEASED },
1029 { 0x8A, SONYPI_EVENT_FNKEY_F10 }, 1100 { 0x8A, SONYPI_EVENT_FNKEY_F10 },
1030 { 0x0A, SONYPI_EVENT_FNKEY_RELEASED }, 1101 { 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
1102 { 0x8B, SONYPI_EVENT_FNKEY_F11 },
1103 { 0x0B, SONYPI_EVENT_FNKEY_RELEASED },
1031 { 0x8C, SONYPI_EVENT_FNKEY_F12 }, 1104 { 0x8C, SONYPI_EVENT_FNKEY_F12 },
1032 { 0x0C, SONYPI_EVENT_FNKEY_RELEASED }, 1105 { 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
1033 { 0x9d, SONYPI_EVENT_ZOOM_PRESSED }, 1106 { 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
@@ -1063,63 +1136,116 @@ static struct sony_nc_event sony_127_events[] = {
1063 { 0, 0 }, 1136 { 0, 0 },
1064}; 1137};
1065 1138
1139static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
1140{
1141 int ret = -EINVAL;
1142 unsigned int result = 0;
1143 struct sony_nc_event *key_event;
1144
1145 if (sony_call_snc_handle(handle, 0x200, &result)) {
1146 dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle,
1147 event);
1148 return -EINVAL;
1149 }
1150
1151 result &= 0xFF;
1152
1153 if (handle == 0x0100)
1154 key_event = sony_100_events;
1155 else
1156 key_event = sony_127_events;
1157
1158 for (; key_event->data; key_event++) {
1159 if (key_event->data == result) {
1160 ret = key_event->event;
1161 break;
1162 }
1163 }
1164
1165 if (!key_event->data)
1166 pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n",
1167 event, result, handle);
1168
1169 return ret;
1170}
1171
1066/* 1172/*
1067 * ACPI callbacks 1173 * ACPI callbacks
1068 */ 1174 */
1069static void sony_nc_notify(struct acpi_device *device, u32 event) 1175static void sony_nc_notify(struct acpi_device *device, u32 event)
1070{ 1176{
1071 u32 ev = event; 1177 u32 real_ev = event;
1178 u8 ev_type = 0;
1179 dprintk("sony_nc_notify, event: 0x%.2x\n", event);
1180
1181 if (event >= 0x90) {
1182 unsigned int result = 0;
1183 unsigned int arg = 0;
1184 unsigned int handle = 0;
1185 unsigned int offset = event - 0x90;
1186
1187 if (offset >= ARRAY_SIZE(handles->cap)) {
1188 pr_err("Event 0x%x outside of capabilities list\n",
1189 event);
1190 return;
1191 }
1192 handle = handles->cap[offset];
1193
1194 /* list of handles known for generating events */
1195 switch (handle) {
1196 /* hotkey event */
1197 case 0x0100:
1198 case 0x0127:
1199 ev_type = 1;
1200 real_ev = sony_nc_hotkeys_decode(event, handle);
1201
1202 if (real_ev > 0)
1203 sony_laptop_report_input_event(real_ev);
1204 else
1205 /* restore the original event for reporting */
1206 real_ev = event;
1072 1207
1073 if (ev >= 0x90) { 1208 break;
1074 /* New-style event */
1075 int result;
1076 int key_handle = 0;
1077 ev -= 0x90;
1078
1079 if (sony_find_snc_handle(0x100) == ev)
1080 key_handle = 0x100;
1081 if (sony_find_snc_handle(0x127) == ev)
1082 key_handle = 0x127;
1083
1084 if (key_handle) {
1085 struct sony_nc_event *key_event;
1086
1087 if (sony_call_snc_handle(key_handle, 0x200, &result)) {
1088 dprintk("sony_nc_notify, unable to decode"
1089 " event 0x%.2x 0x%.2x\n", key_handle,
1090 ev);
1091 /* restore the original event */
1092 ev = event;
1093 } else {
1094 ev = result & 0xFF;
1095
1096 if (key_handle == 0x100)
1097 key_event = sony_100_events;
1098 else
1099 key_event = sony_127_events;
1100
1101 for (; key_event->data; key_event++) {
1102 if (key_event->data == ev) {
1103 ev = key_event->event;
1104 break;
1105 }
1106 }
1107 1209
1108 if (!key_event->data) 1210 /* wlan switch */
1109 pr_info("Unknown event: 0x%x 0x%x\n", 1211 case 0x0124:
1110 key_handle, ev); 1212 case 0x0135:
1111 else 1213 /* events on this handle are reported when the
1112 sony_laptop_report_input_event(ev); 1214 * switch changes position or for battery
1113 } 1215 * events. We'll notify both of them but only
1114 } else if (sony_find_snc_handle(sony_rfkill_handle) == ev) { 1216 * update the rfkill device status when the
1115 sony_nc_rfkill_update(); 1217 * switch is moved.
1116 return; 1218 */
1219 ev_type = 2;
1220 sony_call_snc_handle(handle, 0x0100, &result);
1221 real_ev = result & 0x03;
1222
1223 /* hw switch event */
1224 if (real_ev == 1)
1225 sony_nc_rfkill_update();
1226
1227 break;
1228
1229 default:
1230 dprintk("Unknown event 0x%x for handle 0x%x\n",
1231 event, handle);
1232 break;
1117 } 1233 }
1118 } else
1119 sony_laptop_report_input_event(ev);
1120 1234
1121 dprintk("sony_nc_notify, event: 0x%.2x\n", ev); 1235 /* clear the event (and the event reason when present) */
1122 acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev); 1236 arg = 1 << offset;
1237 sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result);
1238
1239 } else {
1240 /* old style event */
1241 ev_type = 1;
1242 sony_laptop_report_input_event(real_ev);
1243 }
1244
1245 acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev);
1246
1247 acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
1248 dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
1123} 1249}
1124 1250
1125static acpi_status sony_walk_callback(acpi_handle handle, u32 level, 1251static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
@@ -1140,20 +1266,190 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
1140/* 1266/*
1141 * ACPI device 1267 * ACPI device
1142 */ 1268 */
1143static int sony_nc_function_setup(struct acpi_device *device) 1269static void sony_nc_function_setup(struct acpi_device *device,
1270 struct platform_device *pf_device)
1144{ 1271{
1145 int result; 1272 unsigned int i, result, bitmask, arg;
1273
1274 if (!handles)
1275 return;
1276
1277 /* setup found handles here */
1278 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
1279 unsigned int handle = handles->cap[i];
1280
1281 if (!handle)
1282 continue;
1283
1284 dprintk("setting up handle 0x%.4x\n", handle);
1285
1286 switch (handle) {
1287 case 0x0100:
1288 case 0x0101:
1289 case 0x0127:
1290 /* setup hotkeys */
1291 sony_call_snc_handle(handle, 0, &result);
1292 break;
1293 case 0x0102:
1294 /* setup hotkeys */
1295 sony_call_snc_handle(handle, 0x100, &result);
1296 break;
1297 case 0x0105:
1298 case 0x0148:
1299 /* touchpad enable/disable */
1300 result = sony_nc_touchpad_setup(pf_device, handle);
1301 if (result)
1302 pr_err("couldn't set up touchpad control function (%d)\n",
1303 result);
1304 break;
1305 case 0x0115:
1306 case 0x0136:
1307 case 0x013f:
1308 result = sony_nc_battery_care_setup(pf_device, handle);
1309 if (result)
1310 pr_err("couldn't set up battery care function (%d)\n",
1311 result);
1312 break;
1313 case 0x0119:
1314 result = sony_nc_lid_resume_setup(pf_device);
1315 if (result)
1316 pr_err("couldn't set up lid resume function (%d)\n",
1317 result);
1318 break;
1319 case 0x0122:
1320 result = sony_nc_thermal_setup(pf_device);
1321 if (result)
1322 pr_err("couldn't set up thermal profile function (%d)\n",
1323 result);
1324 break;
1325 case 0x0131:
1326 result = sony_nc_highspeed_charging_setup(pf_device);
1327 if (result)
1328 pr_err("couldn't set up high speed charging function (%d)\n",
1329 result);
1330 break;
1331 case 0x0124:
1332 case 0x0135:
1333 result = sony_nc_rfkill_setup(device, handle);
1334 if (result)
1335 pr_err("couldn't set up rfkill support (%d)\n",
1336 result);
1337 break;
1338 case 0x0137:
1339 case 0x0143:
1340 result = sony_nc_kbd_backlight_setup(pf_device, handle);
1341 if (result)
1342 pr_err("couldn't set up keyboard backlight function (%d)\n",
1343 result);
1344 break;
1345 default:
1346 continue;
1347 }
1348 }
1146 1349
1147 /* Enable all events */ 1350 /* Enable all events */
1148 acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result); 1351 arg = 0x10;
1352 if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
1353 sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
1354 &result);
1355}
1356
1357static void sony_nc_function_cleanup(struct platform_device *pd)
1358{
1359 unsigned int i, result, bitmask, handle;
1149 1360
1150 /* Setup hotkeys */ 1361 /* get enabled events and disable them */
1151 sony_call_snc_handle(0x0100, 0, &result); 1362 sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
1152 sony_call_snc_handle(0x0101, 0, &result); 1363 sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
1153 sony_call_snc_handle(0x0102, 0x100, &result);
1154 sony_call_snc_handle(0x0127, 0, &result);
1155 1364
1156 return 0; 1365 /* cleanup handles here */
1366 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
1367
1368 handle = handles->cap[i];
1369
1370 if (!handle)
1371 continue;
1372
1373 switch (handle) {
1374 case 0x0105:
1375 case 0x0148:
1376 sony_nc_touchpad_cleanup(pd);
1377 break;
1378 case 0x0115:
1379 case 0x0136:
1380 case 0x013f:
1381 sony_nc_battery_care_cleanup(pd);
1382 break;
1383 case 0x0119:
1384 sony_nc_lid_resume_cleanup(pd);
1385 break;
1386 case 0x0122:
1387 sony_nc_thermal_cleanup(pd);
1388 break;
1389 case 0x0131:
1390 sony_nc_highspeed_charging_cleanup(pd);
1391 break;
1392 case 0x0124:
1393 case 0x0135:
1394 sony_nc_rfkill_cleanup();
1395 break;
1396 case 0x0137:
1397 case 0x0143:
1398 sony_nc_kbd_backlight_cleanup(pd);
1399 break;
1400 default:
1401 continue;
1402 }
1403 }
1404
1405 /* finally cleanup the handles list */
1406 sony_nc_handles_cleanup(pd);
1407}
1408
1409static void sony_nc_function_resume(void)
1410{
1411 unsigned int i, result, bitmask, arg;
1412
1413 dprintk("Resuming SNC device\n");
1414
1415 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
1416 unsigned int handle = handles->cap[i];
1417
1418 if (!handle)
1419 continue;
1420
1421 switch (handle) {
1422 case 0x0100:
1423 case 0x0101:
1424 case 0x0127:
1425 /* re-enable hotkeys */
1426 sony_call_snc_handle(handle, 0, &result);
1427 break;
1428 case 0x0102:
1429 /* re-enable hotkeys */
1430 sony_call_snc_handle(handle, 0x100, &result);
1431 break;
1432 case 0x0122:
1433 sony_nc_thermal_resume();
1434 break;
1435 case 0x0124:
1436 case 0x0135:
1437 sony_nc_rfkill_update();
1438 break;
1439 case 0x0137:
1440 case 0x0143:
1441 sony_nc_kbd_backlight_resume();
1442 break;
1443 default:
1444 continue;
1445 }
1446 }
1447
1448 /* Enable all events */
1449 arg = 0x10;
1450 if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
1451 sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
1452 &result);
1157} 1453}
1158 1454
1159static int sony_nc_resume(struct acpi_device *device) 1455static int sony_nc_resume(struct acpi_device *device)
@@ -1166,8 +1462,8 @@ static int sony_nc_resume(struct acpi_device *device)
1166 1462
1167 if (!item->valid) 1463 if (!item->valid)
1168 continue; 1464 continue;
1169 ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, 1465 ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
1170 item->value, NULL); 1466 &item->value, NULL);
1171 if (ret < 0) { 1467 if (ret < 0) {
1172 pr_err("%s: %d\n", __func__, ret); 1468 pr_err("%s: %d\n", __func__, ret);
1173 break; 1469 break;
@@ -1176,21 +1472,14 @@ static int sony_nc_resume(struct acpi_device *device)
1176 1472
1177 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", 1473 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
1178 &handle))) { 1474 &handle))) {
1179 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) 1475 int arg = 1;
1476 if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
1180 dprintk("ECON Method failed\n"); 1477 dprintk("ECON Method failed\n");
1181 } 1478 }
1182 1479
1183 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", 1480 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
1184 &handle))) { 1481 &handle)))
1185 dprintk("Doing SNC setup\n"); 1482 sony_nc_function_resume();
1186 sony_nc_function_setup(device);
1187 }
1188
1189 /* re-read rfkill state */
1190 sony_nc_rfkill_update();
1191
1192 /* restore kbd backlight states */
1193 sony_nc_kbd_backlight_resume();
1194 1483
1195 return 0; 1484 return 0;
1196} 1485}
@@ -1213,7 +1502,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
1213 int argument = sony_rfkill_address[(long) data] + 0x100; 1502 int argument = sony_rfkill_address[(long) data] + 0x100;
1214 1503
1215 if (!blocked) 1504 if (!blocked)
1216 argument |= 0xff0000; 1505 argument |= 0x030000;
1217 1506
1218 return sony_call_snc_handle(sony_rfkill_handle, argument, &result); 1507 return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
1219} 1508}
@@ -1230,7 +1519,7 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
1230 enum rfkill_type type; 1519 enum rfkill_type type;
1231 const char *name; 1520 const char *name;
1232 int result; 1521 int result;
1233 bool hwblock; 1522 bool hwblock, swblock;
1234 1523
1235 switch (nc_type) { 1524 switch (nc_type) {
1236 case SONY_WIFI: 1525 case SONY_WIFI:
@@ -1258,8 +1547,21 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
1258 if (!rfk) 1547 if (!rfk)
1259 return -ENOMEM; 1548 return -ENOMEM;
1260 1549
1261 sony_call_snc_handle(sony_rfkill_handle, 0x200, &result); 1550 if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
1551 rfkill_destroy(rfk);
1552 return -1;
1553 }
1262 hwblock = !(result & 0x1); 1554 hwblock = !(result & 0x1);
1555
1556 if (sony_call_snc_handle(sony_rfkill_handle,
1557 sony_rfkill_address[nc_type],
1558 &result) < 0) {
1559 rfkill_destroy(rfk);
1560 return -1;
1561 }
1562 swblock = !(result & 0x2);
1563
1564 rfkill_init_sw_state(rfk, swblock);
1263 rfkill_set_hw_state(rfk, hwblock); 1565 rfkill_set_hw_state(rfk, hwblock);
1264 1566
1265 err = rfkill_register(rfk); 1567 err = rfkill_register(rfk);
@@ -1295,101 +1597,79 @@ static void sony_nc_rfkill_update(void)
1295 1597
1296 sony_call_snc_handle(sony_rfkill_handle, argument, &result); 1598 sony_call_snc_handle(sony_rfkill_handle, argument, &result);
1297 rfkill_set_states(sony_rfkill_devices[i], 1599 rfkill_set_states(sony_rfkill_devices[i],
1298 !(result & 0xf), false); 1600 !(result & 0x2), false);
1299 } 1601 }
1300} 1602}
1301 1603
1302static void sony_nc_rfkill_setup(struct acpi_device *device) 1604static int sony_nc_rfkill_setup(struct acpi_device *device,
1605 unsigned int handle)
1303{ 1606{
1304 int offset; 1607 u64 offset;
1305 u8 dev_code, i; 1608 int i;
1306 acpi_status status; 1609 unsigned char buffer[32] = { 0 };
1307 struct acpi_object_list params;
1308 union acpi_object in_obj;
1309 union acpi_object *device_enum;
1310 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1311
1312 offset = sony_find_snc_handle(0x124);
1313 if (offset == -1) {
1314 offset = sony_find_snc_handle(0x135);
1315 if (offset == -1)
1316 return;
1317 else
1318 sony_rfkill_handle = 0x135;
1319 } else
1320 sony_rfkill_handle = 0x124;
1321 dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle);
1322
1323 /* need to read the whole buffer returned by the acpi call to SN06
1324 * here otherwise we may miss some features
1325 */
1326 params.count = 1;
1327 params.pointer = &in_obj;
1328 in_obj.type = ACPI_TYPE_INTEGER;
1329 in_obj.integer.value = offset;
1330 status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
1331 &buffer);
1332 if (ACPI_FAILURE(status)) {
1333 dprintk("Radio device enumeration failed\n");
1334 return;
1335 }
1336
1337 device_enum = (union acpi_object *) buffer.pointer;
1338 if (!device_enum) {
1339 pr_err("No SN06 return object\n");
1340 goto out_no_enum;
1341 }
1342 if (device_enum->type != ACPI_TYPE_BUFFER) {
1343 pr_err("Invalid SN06 return object 0x%.2x\n",
1344 device_enum->type);
1345 goto out_no_enum;
1346 }
1347 1610
1348 /* the buffer is filled with magic numbers describing the devices 1611 offset = sony_find_snc_handle(handle);
1349 * available, 0xff terminates the enumeration 1612 sony_rfkill_handle = handle;
1613
1614 i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
1615 32);
1616 if (i < 0)
1617 return i;
1618
1619 /* The buffer is filled with magic numbers describing the devices
1620 * available, 0xff terminates the enumeration.
1621 * Known codes:
1622 * 0x00 WLAN
1623 * 0x10 BLUETOOTH
1624 * 0x20 WWAN GPRS-EDGE
1625 * 0x21 WWAN HSDPA
1626 * 0x22 WWAN EV-DO
1627 * 0x23 WWAN GPS
1628 * 0x25 Gobi WWAN no GPS
1629 * 0x26 Gobi WWAN + GPS
1630 * 0x28 Gobi WWAN no GPS
1631 * 0x29 Gobi WWAN + GPS
1632 * 0x30 WIMAX
1633 * 0x50 Gobi WWAN no GPS
1634 * 0x51 Gobi WWAN + GPS
1635 * 0x70 no SIM card slot
1636 * 0x71 SIM card slot
1350 */ 1637 */
1351 for (i = 0; i < device_enum->buffer.length; i++) { 1638 for (i = 0; i < ARRAY_SIZE(buffer); i++) {
1352 1639
1353 dev_code = *(device_enum->buffer.pointer + i); 1640 if (buffer[i] == 0xff)
1354 if (dev_code == 0xff)
1355 break; 1641 break;
1356 1642
1357 dprintk("Radio devices, looking at 0x%.2x\n", dev_code); 1643 dprintk("Radio devices, found 0x%.2x\n", buffer[i]);
1358 1644
1359 if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI]) 1645 if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI])
1360 sony_nc_setup_rfkill(device, SONY_WIFI); 1646 sony_nc_setup_rfkill(device, SONY_WIFI);
1361 1647
1362 if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH]) 1648 if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
1363 sony_nc_setup_rfkill(device, SONY_BLUETOOTH); 1649 sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
1364 1650
1365 if ((0xf0 & dev_code) == 0x20 && 1651 if (((0xf0 & buffer[i]) == 0x20 ||
1652 (0xf0 & buffer[i]) == 0x50) &&
1366 !sony_rfkill_devices[SONY_WWAN]) 1653 !sony_rfkill_devices[SONY_WWAN])
1367 sony_nc_setup_rfkill(device, SONY_WWAN); 1654 sony_nc_setup_rfkill(device, SONY_WWAN);
1368 1655
1369 if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX]) 1656 if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
1370 sony_nc_setup_rfkill(device, SONY_WIMAX); 1657 sony_nc_setup_rfkill(device, SONY_WIMAX);
1371 } 1658 }
1372 1659 return 0;
1373out_no_enum:
1374 kfree(buffer.pointer);
1375 return;
1376} 1660}
1377 1661
1378/* Keyboard backlight feature */ 1662/* Keyboard backlight feature */
1379#define KBDBL_HANDLER 0x137
1380#define KBDBL_PRESENT 0xB00
1381#define SET_MODE 0xC00
1382#define SET_STATE 0xD00
1383#define SET_TIMEOUT 0xE00
1384
1385struct kbd_backlight { 1663struct kbd_backlight {
1386 int mode; 1664 unsigned int handle;
1387 int timeout; 1665 unsigned int base;
1666 unsigned int mode;
1667 unsigned int timeout;
1388 struct device_attribute mode_attr; 1668 struct device_attribute mode_attr;
1389 struct device_attribute timeout_attr; 1669 struct device_attribute timeout_attr;
1390}; 1670};
1391 1671
1392static struct kbd_backlight *kbdbl_handle; 1672static struct kbd_backlight *kbdbl_ctl;
1393 1673
1394static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) 1674static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
1395{ 1675{
@@ -1398,15 +1678,15 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
1398 if (value > 1) 1678 if (value > 1)
1399 return -EINVAL; 1679 return -EINVAL;
1400 1680
1401 if (sony_call_snc_handle(KBDBL_HANDLER, 1681 if (sony_call_snc_handle(kbdbl_ctl->handle,
1402 (value << 0x10) | SET_MODE, &result)) 1682 (value << 0x10) | (kbdbl_ctl->base), &result))
1403 return -EIO; 1683 return -EIO;
1404 1684
1405 /* Try to turn the light on/off immediately */ 1685 /* Try to turn the light on/off immediately */
1406 sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE, 1686 sony_call_snc_handle(kbdbl_ctl->handle,
1407 &result); 1687 (value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
1408 1688
1409 kbdbl_handle->mode = value; 1689 kbdbl_ctl->mode = value;
1410 1690
1411 return 0; 1691 return 0;
1412} 1692}
@@ -1421,7 +1701,7 @@ static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
1421 if (count > 31) 1701 if (count > 31)
1422 return -EINVAL; 1702 return -EINVAL;
1423 1703
1424 if (strict_strtoul(buffer, 10, &value)) 1704 if (kstrtoul(buffer, 10, &value))
1425 return -EINVAL; 1705 return -EINVAL;
1426 1706
1427 ret = __sony_nc_kbd_backlight_mode_set(value); 1707 ret = __sony_nc_kbd_backlight_mode_set(value);
@@ -1435,7 +1715,7 @@ static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
1435 struct device_attribute *attr, char *buffer) 1715 struct device_attribute *attr, char *buffer)
1436{ 1716{
1437 ssize_t count = 0; 1717 ssize_t count = 0;
1438 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode); 1718 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode);
1439 return count; 1719 return count;
1440} 1720}
1441 1721
@@ -1446,11 +1726,11 @@ static int __sony_nc_kbd_backlight_timeout_set(u8 value)
1446 if (value > 3) 1726 if (value > 3)
1447 return -EINVAL; 1727 return -EINVAL;
1448 1728
1449 if (sony_call_snc_handle(KBDBL_HANDLER, 1729 if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) |
1450 (value << 0x10) | SET_TIMEOUT, &result)) 1730 (kbdbl_ctl->base + 0x200), &result))
1451 return -EIO; 1731 return -EIO;
1452 1732
1453 kbdbl_handle->timeout = value; 1733 kbdbl_ctl->timeout = value;
1454 1734
1455 return 0; 1735 return 0;
1456} 1736}
@@ -1465,7 +1745,7 @@ static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
1465 if (count > 31) 1745 if (count > 31)
1466 return -EINVAL; 1746 return -EINVAL;
1467 1747
1468 if (strict_strtoul(buffer, 10, &value)) 1748 if (kstrtoul(buffer, 10, &value))
1469 return -EINVAL; 1749 return -EINVAL;
1470 1750
1471 ret = __sony_nc_kbd_backlight_timeout_set(value); 1751 ret = __sony_nc_kbd_backlight_timeout_set(value);
@@ -1479,39 +1759,58 @@ static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
1479 struct device_attribute *attr, char *buffer) 1759 struct device_attribute *attr, char *buffer)
1480{ 1760{
1481 ssize_t count = 0; 1761 ssize_t count = 0;
1482 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout); 1762 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout);
1483 return count; 1763 return count;
1484} 1764}
1485 1765
1486static int sony_nc_kbd_backlight_setup(struct platform_device *pd) 1766static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1767 unsigned int handle)
1487{ 1768{
1488 int result; 1769 int result;
1770 int ret = 0;
1489 1771
1490 if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result)) 1772 /* verify the kbd backlight presence, these handles are not used for
1491 return 0; 1773 * keyboard backlight only
1492 if (!(result & 0x02)) 1774 */
1775 ret = sony_call_snc_handle(handle, handle == 0x0137 ? 0x0B00 : 0x0100,
1776 &result);
1777 if (ret)
1778 return ret;
1779
1780 if ((handle == 0x0137 && !(result & 0x02)) ||
1781 !(result & 0x01)) {
1782 dprintk("no backlight keyboard found\n");
1493 return 0; 1783 return 0;
1784 }
1494 1785
1495 kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL); 1786 kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
1496 if (!kbdbl_handle) 1787 if (!kbdbl_ctl)
1497 return -ENOMEM; 1788 return -ENOMEM;
1498 1789
1499 sysfs_attr_init(&kbdbl_handle->mode_attr.attr); 1790 kbdbl_ctl->handle = handle;
1500 kbdbl_handle->mode_attr.attr.name = "kbd_backlight"; 1791 if (handle == 0x0137)
1501 kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR; 1792 kbdbl_ctl->base = 0x0C00;
1502 kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show; 1793 else
1503 kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store; 1794 kbdbl_ctl->base = 0x4000;
1795
1796 sysfs_attr_init(&kbdbl_ctl->mode_attr.attr);
1797 kbdbl_ctl->mode_attr.attr.name = "kbd_backlight";
1798 kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
1799 kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show;
1800 kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store;
1504 1801
1505 sysfs_attr_init(&kbdbl_handle->timeout_attr.attr); 1802 sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr);
1506 kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout"; 1803 kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout";
1507 kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR; 1804 kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
1508 kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show; 1805 kbdbl_ctl->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
1509 kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store; 1806 kbdbl_ctl->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
1510 1807
1511 if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr)) 1808 ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr);
1809 if (ret)
1512 goto outkzalloc; 1810 goto outkzalloc;
1513 1811
1514 if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr)) 1812 ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr);
1813 if (ret)
1515 goto outmode; 1814 goto outmode;
1516 1815
1517 __sony_nc_kbd_backlight_mode_set(kbd_backlight); 1816 __sony_nc_kbd_backlight_mode_set(kbd_backlight);
@@ -1520,57 +1819,661 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
1520 return 0; 1819 return 0;
1521 1820
1522outmode: 1821outmode:
1523 device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); 1822 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
1524outkzalloc: 1823outkzalloc:
1525 kfree(kbdbl_handle); 1824 kfree(kbdbl_ctl);
1526 kbdbl_handle = NULL; 1825 kbdbl_ctl = NULL;
1527 return -1; 1826 return ret;
1528} 1827}
1529 1828
1530static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) 1829static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1531{ 1830{
1532 if (kbdbl_handle) { 1831 if (kbdbl_ctl) {
1533 int result; 1832 int result;
1534 1833
1535 device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); 1834 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
1536 device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); 1835 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
1537 1836
1538 /* restore the default hw behaviour */ 1837 /* restore the default hw behaviour */
1539 sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result); 1838 sony_call_snc_handle(kbdbl_ctl->handle,
1540 sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result); 1839 kbdbl_ctl->base | 0x10000, &result);
1840 sony_call_snc_handle(kbdbl_ctl->handle,
1841 kbdbl_ctl->base + 0x200, &result);
1541 1842
1542 kfree(kbdbl_handle); 1843 kfree(kbdbl_ctl);
1844 kbdbl_ctl = NULL;
1543 } 1845 }
1544 return 0;
1545} 1846}
1546 1847
1547static void sony_nc_kbd_backlight_resume(void) 1848static void sony_nc_kbd_backlight_resume(void)
1548{ 1849{
1549 int ignore = 0; 1850 int ignore = 0;
1550 1851
1551 if (!kbdbl_handle) 1852 if (!kbdbl_ctl)
1552 return; 1853 return;
1553 1854
1554 if (kbdbl_handle->mode == 0) 1855 if (kbdbl_ctl->mode == 0)
1555 sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore); 1856 sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
1556
1557 if (kbdbl_handle->timeout != 0)
1558 sony_call_snc_handle(KBDBL_HANDLER,
1559 (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
1560 &ignore); 1857 &ignore);
1858
1859 if (kbdbl_ctl->timeout != 0)
1860 sony_call_snc_handle(kbdbl_ctl->handle,
1861 (kbdbl_ctl->base + 0x200) |
1862 (kbdbl_ctl->timeout << 0x10), &ignore);
1863}
1864
1865struct battery_care_control {
1866 struct device_attribute attrs[2];
1867 unsigned int handle;
1868};
1869static struct battery_care_control *bcare_ctl;
1870
1871static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
1872 struct device_attribute *attr,
1873 const char *buffer, size_t count)
1874{
1875 unsigned int result, cmd;
1876 unsigned long value;
1877
1878 if (count > 31)
1879 return -EINVAL;
1880
1881 if (kstrtoul(buffer, 10, &value))
1882 return -EINVAL;
1883
1884 /* limit values (2 bits):
1885 * 00 - none
1886 * 01 - 80%
1887 * 10 - 50%
1888 * 11 - 100%
1889 *
1890 * bit 0: 0 disable BCL, 1 enable BCL
1891 * bit 1: 1 tell to store the battery limit (see bits 6,7) too
1892 * bits 2,3: reserved
1893 * bits 4,5: store the limit into the EC
1894 * bits 6,7: store the limit into the battery
1895 */
1896
1897 /*
1898 * handle 0x0115 should allow storing on battery too;
1899 * handle 0x0136 same as 0x0115 + health status;
1900 * handle 0x013f, same as 0x0136 but no storing on the battery
1901 *
1902 * Store only inside the EC for now, regardless the handle number
1903 */
1904 if (value == 0)
1905 /* disable limits */
1906 cmd = 0x0;
1907
1908 else if (value <= 50)
1909 cmd = 0x21;
1910
1911 else if (value <= 80)
1912 cmd = 0x11;
1913
1914 else if (value <= 100)
1915 cmd = 0x31;
1916
1917 else
1918 return -EINVAL;
1919
1920 if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100,
1921 &result))
1922 return -EIO;
1923
1924 return count;
1925}
1926
1927static ssize_t sony_nc_battery_care_limit_show(struct device *dev,
1928 struct device_attribute *attr, char *buffer)
1929{
1930 unsigned int result, status;
1931
1932 if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result))
1933 return -EIO;
1934
1935 status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0;
1936 switch (status) {
1937 case 1:
1938 status = 80;
1939 break;
1940 case 2:
1941 status = 50;
1942 break;
1943 case 3:
1944 status = 100;
1945 break;
1946 default:
1947 status = 0;
1948 break;
1949 }
1950
1951 return snprintf(buffer, PAGE_SIZE, "%d\n", status);
1952}
1953
1954static ssize_t sony_nc_battery_care_health_show(struct device *dev,
1955 struct device_attribute *attr, char *buffer)
1956{
1957 ssize_t count = 0;
1958 unsigned int health;
1959
1960 if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health))
1961 return -EIO;
1962
1963 count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff);
1964
1965 return count;
1966}
1967
1968static int sony_nc_battery_care_setup(struct platform_device *pd,
1969 unsigned int handle)
1970{
1971 int ret = 0;
1972
1973 bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL);
1974 if (!bcare_ctl)
1975 return -ENOMEM;
1976
1977 bcare_ctl->handle = handle;
1978
1979 sysfs_attr_init(&bcare_ctl->attrs[0].attr);
1980 bcare_ctl->attrs[0].attr.name = "battery_care_limiter";
1981 bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
1982 bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show;
1983 bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store;
1984
1985 ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]);
1986 if (ret)
1987 goto outkzalloc;
1988
1989 /* 0x0115 is for models with no health reporting capability */
1990 if (handle == 0x0115)
1991 return 0;
1992
1993 sysfs_attr_init(&bcare_ctl->attrs[1].attr);
1994 bcare_ctl->attrs[1].attr.name = "battery_care_health";
1995 bcare_ctl->attrs[1].attr.mode = S_IRUGO;
1996 bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show;
1997
1998 ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]);
1999 if (ret)
2000 goto outlimiter;
2001
2002 return 0;
2003
2004outlimiter:
2005 device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
2006
2007outkzalloc:
2008 kfree(bcare_ctl);
2009 bcare_ctl = NULL;
2010
2011 return ret;
2012}
2013
2014static void sony_nc_battery_care_cleanup(struct platform_device *pd)
2015{
2016 if (bcare_ctl) {
2017 device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
2018 if (bcare_ctl->handle != 0x0115)
2019 device_remove_file(&pd->dev, &bcare_ctl->attrs[1]);
2020
2021 kfree(bcare_ctl);
2022 bcare_ctl = NULL;
2023 }
2024}
2025
2026struct snc_thermal_ctrl {
2027 unsigned int mode;
2028 unsigned int profiles;
2029 struct device_attribute mode_attr;
2030 struct device_attribute profiles_attr;
2031};
2032static struct snc_thermal_ctrl *th_handle;
2033
2034#define THM_PROFILE_MAX 3
2035static const char * const snc_thermal_profiles[] = {
2036 "balanced",
2037 "silent",
2038 "performance"
2039};
2040
2041static int sony_nc_thermal_mode_set(unsigned short mode)
2042{
2043 unsigned int result;
2044
2045 /* the thermal profile seems to be a two bit bitmask:
2046 * lsb -> silent
2047 * msb -> performance
2048 * no bit set is the normal operation and is always valid
2049 * Some vaio models only have "balanced" and "performance"
2050 */
2051 if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX)
2052 return -EINVAL;
2053
2054 if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result))
2055 return -EIO;
2056
2057 th_handle->mode = mode;
2058
2059 return 0;
2060}
2061
2062static int sony_nc_thermal_mode_get(void)
2063{
2064 unsigned int result;
2065
2066 if (sony_call_snc_handle(0x0122, 0x0100, &result))
2067 return -EIO;
2068
2069 return result & 0xff;
2070}
2071
2072static ssize_t sony_nc_thermal_profiles_show(struct device *dev,
2073 struct device_attribute *attr, char *buffer)
2074{
2075 short cnt;
2076 size_t idx = 0;
2077
2078 for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) {
2079 if (!cnt || (th_handle->profiles & cnt))
2080 idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
2081 snc_thermal_profiles[cnt]);
2082 }
2083 idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n");
2084
2085 return idx;
2086}
2087
2088static ssize_t sony_nc_thermal_mode_store(struct device *dev,
2089 struct device_attribute *attr,
2090 const char *buffer, size_t count)
2091{
2092 unsigned short cmd;
2093 size_t len = count;
2094
2095 if (count == 0)
2096 return -EINVAL;
2097
2098 /* skip the newline if present */
2099 if (buffer[len - 1] == '\n')
2100 len--;
2101
2102 for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++)
2103 if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0)
2104 break;
2105
2106 if (sony_nc_thermal_mode_set(cmd))
2107 return -EIO;
2108
2109 return count;
2110}
2111
2112static ssize_t sony_nc_thermal_mode_show(struct device *dev,
2113 struct device_attribute *attr, char *buffer)
2114{
2115 ssize_t count = 0;
2116 unsigned int mode = sony_nc_thermal_mode_get();
2117
2118 if (mode < 0)
2119 return mode;
2120
2121 count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]);
2122
2123 return count;
2124}
2125
2126static int sony_nc_thermal_setup(struct platform_device *pd)
2127{
2128 int ret = 0;
2129 th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL);
2130 if (!th_handle)
2131 return -ENOMEM;
2132
2133 ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles);
2134 if (ret) {
2135 pr_warn("couldn't to read the thermal profiles\n");
2136 goto outkzalloc;
2137 }
2138
2139 ret = sony_nc_thermal_mode_get();
2140 if (ret < 0) {
2141 pr_warn("couldn't to read the current thermal profile");
2142 goto outkzalloc;
2143 }
2144 th_handle->mode = ret;
2145
2146 sysfs_attr_init(&th_handle->profiles_attr.attr);
2147 th_handle->profiles_attr.attr.name = "thermal_profiles";
2148 th_handle->profiles_attr.attr.mode = S_IRUGO;
2149 th_handle->profiles_attr.show = sony_nc_thermal_profiles_show;
2150
2151 sysfs_attr_init(&th_handle->mode_attr.attr);
2152 th_handle->mode_attr.attr.name = "thermal_control";
2153 th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
2154 th_handle->mode_attr.show = sony_nc_thermal_mode_show;
2155 th_handle->mode_attr.store = sony_nc_thermal_mode_store;
2156
2157 ret = device_create_file(&pd->dev, &th_handle->profiles_attr);
2158 if (ret)
2159 goto outkzalloc;
2160
2161 ret = device_create_file(&pd->dev, &th_handle->mode_attr);
2162 if (ret)
2163 goto outprofiles;
2164
2165 return 0;
2166
2167outprofiles:
2168 device_remove_file(&pd->dev, &th_handle->profiles_attr);
2169outkzalloc:
2170 kfree(th_handle);
2171 th_handle = NULL;
2172 return ret;
2173}
2174
2175static void sony_nc_thermal_cleanup(struct platform_device *pd)
2176{
2177 if (th_handle) {
2178 device_remove_file(&pd->dev, &th_handle->profiles_attr);
2179 device_remove_file(&pd->dev, &th_handle->mode_attr);
2180 kfree(th_handle);
2181 th_handle = NULL;
2182 }
2183}
2184
2185static void sony_nc_thermal_resume(void)
2186{
2187 unsigned int status = sony_nc_thermal_mode_get();
2188
2189 if (status != th_handle->mode)
2190 sony_nc_thermal_mode_set(th_handle->mode);
2191}
2192
2193/* resume on LID open */
2194struct snc_lid_resume_control {
2195 struct device_attribute attrs[3];
2196 unsigned int status;
2197};
2198static struct snc_lid_resume_control *lid_ctl;
2199
2200static ssize_t sony_nc_lid_resume_store(struct device *dev,
2201 struct device_attribute *attr,
2202 const char *buffer, size_t count)
2203{
2204 unsigned int result, pos;
2205 unsigned long value;
2206 if (count > 31)
2207 return -EINVAL;
2208
2209 if (kstrtoul(buffer, 10, &value) || value > 1)
2210 return -EINVAL;
2211
2212 /* the value we have to write to SNC is a bitmask:
2213 * +--------------+
2214 * | S3 | S4 | S5 |
2215 * +--------------+
2216 * 2 1 0
2217 */
2218 if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
2219 pos = 2;
2220 else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
2221 pos = 1;
2222 else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
2223 pos = 0;
2224 else
2225 return -EINVAL;
2226
2227 if (value)
2228 value = lid_ctl->status | (1 << pos);
2229 else
2230 value = lid_ctl->status & ~(1 << pos);
2231
2232 if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
2233 return -EIO;
2234
2235 lid_ctl->status = value;
2236
2237 return count;
2238}
2239
2240static ssize_t sony_nc_lid_resume_show(struct device *dev,
2241 struct device_attribute *attr, char *buffer)
2242{
2243 unsigned int pos;
2244
2245 if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
2246 pos = 2;
2247 else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
2248 pos = 1;
2249 else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
2250 pos = 0;
2251 else
2252 return -EINVAL;
2253
2254 return snprintf(buffer, PAGE_SIZE, "%d\n",
2255 (lid_ctl->status >> pos) & 0x01);
2256}
2257
2258static int sony_nc_lid_resume_setup(struct platform_device *pd)
2259{
2260 unsigned int result;
2261 int i;
2262
2263 if (sony_call_snc_handle(0x0119, 0x0000, &result))
2264 return -EIO;
2265
2266 lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
2267 if (!lid_ctl)
2268 return -ENOMEM;
2269
2270 lid_ctl->status = result & 0x7;
2271
2272 sysfs_attr_init(&lid_ctl->attrs[0].attr);
2273 lid_ctl->attrs[0].attr.name = "lid_resume_S3";
2274 lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
2275 lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
2276 lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
2277
2278 sysfs_attr_init(&lid_ctl->attrs[1].attr);
2279 lid_ctl->attrs[1].attr.name = "lid_resume_S4";
2280 lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
2281 lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
2282 lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
2283
2284 sysfs_attr_init(&lid_ctl->attrs[2].attr);
2285 lid_ctl->attrs[2].attr.name = "lid_resume_S5";
2286 lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
2287 lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
2288 lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
2289
2290 for (i = 0; i < 3; i++) {
2291 result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
2292 if (result)
2293 goto liderror;
2294 }
2295
2296 return 0;
2297
2298liderror:
2299 for (; i > 0; i--)
2300 device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
2301
2302 kfree(lid_ctl);
2303 lid_ctl = NULL;
2304
2305 return result;
2306}
2307
2308static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
2309{
2310 int i;
2311
2312 if (lid_ctl) {
2313 for (i = 0; i < 3; i++)
2314 device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
2315
2316 kfree(lid_ctl);
2317 lid_ctl = NULL;
2318 }
2319}
2320
2321/* High speed charging function */
2322static struct device_attribute *hsc_handle;
2323
2324static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
2325 struct device_attribute *attr,
2326 const char *buffer, size_t count)
2327{
2328 unsigned int result;
2329 unsigned long value;
2330
2331 if (count > 31)
2332 return -EINVAL;
2333
2334 if (kstrtoul(buffer, 10, &value) || value > 1)
2335 return -EINVAL;
2336
2337 if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result))
2338 return -EIO;
2339
2340 return count;
2341}
2342
2343static ssize_t sony_nc_highspeed_charging_show(struct device *dev,
2344 struct device_attribute *attr, char *buffer)
2345{
2346 unsigned int result;
2347
2348 if (sony_call_snc_handle(0x0131, 0x0100, &result))
2349 return -EIO;
2350
2351 return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
2352}
2353
2354static int sony_nc_highspeed_charging_setup(struct platform_device *pd)
2355{
2356 unsigned int result;
2357
2358 if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) {
2359 /* some models advertise the handle but have no implementation
2360 * for it
2361 */
2362 pr_info("No High Speed Charging capability found\n");
2363 return 0;
2364 }
2365
2366 hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
2367 if (!hsc_handle)
2368 return -ENOMEM;
2369
2370 sysfs_attr_init(&hsc_handle->attr);
2371 hsc_handle->attr.name = "battery_highspeed_charging";
2372 hsc_handle->attr.mode = S_IRUGO | S_IWUSR;
2373 hsc_handle->show = sony_nc_highspeed_charging_show;
2374 hsc_handle->store = sony_nc_highspeed_charging_store;
2375
2376 result = device_create_file(&pd->dev, hsc_handle);
2377 if (result) {
2378 kfree(hsc_handle);
2379 hsc_handle = NULL;
2380 return result;
2381 }
2382
2383 return 0;
2384}
2385
2386static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
2387{
2388 if (hsc_handle) {
2389 device_remove_file(&pd->dev, hsc_handle);
2390 kfree(hsc_handle);
2391 hsc_handle = NULL;
2392 }
2393}
2394
2395/* Touchpad enable/disable */
2396struct touchpad_control {
2397 struct device_attribute attr;
2398 int handle;
2399};
2400static struct touchpad_control *tp_ctl;
2401
2402static ssize_t sony_nc_touchpad_store(struct device *dev,
2403 struct device_attribute *attr, const char *buffer, size_t count)
2404{
2405 unsigned int result;
2406 unsigned long value;
2407
2408 if (count > 31)
2409 return -EINVAL;
2410
2411 if (kstrtoul(buffer, 10, &value) || value > 1)
2412 return -EINVAL;
2413
2414 /* sysfs: 0 disabled, 1 enabled
2415 * EC: 0 enabled, 1 disabled
2416 */
2417 if (sony_call_snc_handle(tp_ctl->handle,
2418 (!value << 0x10) | 0x100, &result))
2419 return -EIO;
2420
2421 return count;
2422}
2423
2424static ssize_t sony_nc_touchpad_show(struct device *dev,
2425 struct device_attribute *attr, char *buffer)
2426{
2427 unsigned int result;
2428
2429 if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result))
2430 return -EINVAL;
2431
2432 return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01));
2433}
2434
2435static int sony_nc_touchpad_setup(struct platform_device *pd,
2436 unsigned int handle)
2437{
2438 int ret = 0;
2439
2440 tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL);
2441 if (!tp_ctl)
2442 return -ENOMEM;
2443
2444 tp_ctl->handle = handle;
2445
2446 sysfs_attr_init(&tp_ctl->attr.attr);
2447 tp_ctl->attr.attr.name = "touchpad";
2448 tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR;
2449 tp_ctl->attr.show = sony_nc_touchpad_show;
2450 tp_ctl->attr.store = sony_nc_touchpad_store;
2451
2452 ret = device_create_file(&pd->dev, &tp_ctl->attr);
2453 if (ret) {
2454 kfree(tp_ctl);
2455 tp_ctl = NULL;
2456 }
2457
2458 return ret;
2459}
2460
2461static void sony_nc_touchpad_cleanup(struct platform_device *pd)
2462{
2463 if (tp_ctl) {
2464 device_remove_file(&pd->dev, &tp_ctl->attr);
2465 kfree(tp_ctl);
2466 tp_ctl = NULL;
2467 }
1561} 2468}
1562 2469
1563static void sony_nc_backlight_ng_read_limits(int handle, 2470static void sony_nc_backlight_ng_read_limits(int handle,
1564 struct sony_backlight_props *props) 2471 struct sony_backlight_props *props)
1565{ 2472{
1566 int offset; 2473 u64 offset;
1567 acpi_status status; 2474 int i;
1568 u8 brlvl, i;
1569 u8 min = 0xff, max = 0x00; 2475 u8 min = 0xff, max = 0x00;
1570 struct acpi_object_list params; 2476 unsigned char buffer[32] = { 0 };
1571 union acpi_object in_obj;
1572 union acpi_object *lvl_enum;
1573 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1574 2477
1575 props->handle = handle; 2478 props->handle = handle;
1576 props->offset = 0; 2479 props->offset = 0;
@@ -1583,50 +2486,31 @@ static void sony_nc_backlight_ng_read_limits(int handle,
1583 /* try to read the boundaries from ACPI tables, if we fail the above 2486 /* try to read the boundaries from ACPI tables, if we fail the above
1584 * defaults should be reasonable 2487 * defaults should be reasonable
1585 */ 2488 */
1586 params.count = 1; 2489 i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
1587 params.pointer = &in_obj; 2490 32);
1588 in_obj.type = ACPI_TYPE_INTEGER; 2491 if (i < 0)
1589 in_obj.integer.value = offset;
1590 status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
1591 &buffer);
1592 if (ACPI_FAILURE(status))
1593 return; 2492 return;
1594 2493
1595 lvl_enum = (union acpi_object *) buffer.pointer;
1596 if (!lvl_enum) {
1597 pr_err("No SN06 return object.");
1598 return;
1599 }
1600 if (lvl_enum->type != ACPI_TYPE_BUFFER) {
1601 pr_err("Invalid SN06 return object 0x%.2x\n",
1602 lvl_enum->type);
1603 goto out_invalid;
1604 }
1605
1606 /* the buffer lists brightness levels available, brightness levels are 2494 /* the buffer lists brightness levels available, brightness levels are
1607 * from 0 to 8 in the array, other values are used by ALS control. 2495 * from position 0 to 8 in the array, other values are used by ALS
2496 * control.
1608 */ 2497 */
1609 for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) { 2498 for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) {
1610 2499
1611 brlvl = *(lvl_enum->buffer.pointer + i); 2500 dprintk("Brightness level: %d\n", buffer[i]);
1612 dprintk("Brightness level: %d\n", brlvl);
1613 2501
1614 if (!brlvl) 2502 if (!buffer[i])
1615 break; 2503 break;
1616 2504
1617 if (brlvl > max) 2505 if (buffer[i] > max)
1618 max = brlvl; 2506 max = buffer[i];
1619 if (brlvl < min) 2507 if (buffer[i] < min)
1620 min = brlvl; 2508 min = buffer[i];
1621 } 2509 }
1622 props->offset = min; 2510 props->offset = min;
1623 props->maxlvl = max; 2511 props->maxlvl = max;
1624 dprintk("Brightness levels: min=%d max=%d\n", props->offset, 2512 dprintk("Brightness levels: min=%d max=%d\n", props->offset,
1625 props->maxlvl); 2513 props->maxlvl);
1626
1627out_invalid:
1628 kfree(buffer.pointer);
1629 return;
1630} 2514}
1631 2515
1632static void sony_nc_backlight_setup(void) 2516static void sony_nc_backlight_setup(void)
@@ -1715,28 +2599,25 @@ static int sony_nc_add(struct acpi_device *device)
1715 2599
1716 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", 2600 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
1717 &handle))) { 2601 &handle))) {
1718 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) 2602 int arg = 1;
2603 if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
1719 dprintk("ECON Method failed\n"); 2604 dprintk("ECON Method failed\n");
1720 } 2605 }
1721 2606
1722 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", 2607 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
1723 &handle))) { 2608 &handle))) {
1724 dprintk("Doing SNC setup\n"); 2609 dprintk("Doing SNC setup\n");
2610 /* retrieve the available handles */
1725 result = sony_nc_handles_setup(sony_pf_device); 2611 result = sony_nc_handles_setup(sony_pf_device);
1726 if (result) 2612 if (!result)
1727 goto outpresent; 2613 sony_nc_function_setup(device, sony_pf_device);
1728 result = sony_nc_kbd_backlight_setup(sony_pf_device);
1729 if (result)
1730 goto outsnc;
1731 sony_nc_function_setup(device);
1732 sony_nc_rfkill_setup(device);
1733 } 2614 }
1734 2615
1735 /* setup input devices and helper fifo */ 2616 /* setup input devices and helper fifo */
1736 result = sony_laptop_setup_input(device); 2617 result = sony_laptop_setup_input(device);
1737 if (result) { 2618 if (result) {
1738 pr_err("Unable to create input devices\n"); 2619 pr_err("Unable to create input devices\n");
1739 goto outkbdbacklight; 2620 goto outsnc;
1740 } 2621 }
1741 2622
1742 if (acpi_video_backlight_support()) { 2623 if (acpi_video_backlight_support()) {
@@ -1794,10 +2675,8 @@ static int sony_nc_add(struct acpi_device *device)
1794 2675
1795 sony_laptop_remove_input(); 2676 sony_laptop_remove_input();
1796 2677
1797 outkbdbacklight:
1798 sony_nc_kbd_backlight_cleanup(sony_pf_device);
1799
1800 outsnc: 2678 outsnc:
2679 sony_nc_function_cleanup(sony_pf_device);
1801 sony_nc_handles_cleanup(sony_pf_device); 2680 sony_nc_handles_cleanup(sony_pf_device);
1802 2681
1803 outpresent: 2682 outpresent:
@@ -1820,11 +2699,10 @@ static int sony_nc_remove(struct acpi_device *device, int type)
1820 device_remove_file(&sony_pf_device->dev, &item->devattr); 2699 device_remove_file(&sony_pf_device->dev, &item->devattr);
1821 } 2700 }
1822 2701
1823 sony_nc_kbd_backlight_cleanup(sony_pf_device); 2702 sony_nc_function_cleanup(sony_pf_device);
1824 sony_nc_handles_cleanup(sony_pf_device); 2703 sony_nc_handles_cleanup(sony_pf_device);
1825 sony_pf_remove(); 2704 sony_pf_remove();
1826 sony_laptop_remove_input(); 2705 sony_laptop_remove_input();
1827 sony_nc_rfkill_cleanup();
1828 dprintk(SONY_NC_DRIVER_NAME " removed.\n"); 2706 dprintk(SONY_NC_DRIVER_NAME " removed.\n");
1829 2707
1830 return 0; 2708 return 0;
@@ -2437,7 +3315,9 @@ static ssize_t sony_pic_wwanpower_store(struct device *dev,
2437 if (count > 31) 3315 if (count > 31)
2438 return -EINVAL; 3316 return -EINVAL;
2439 3317
2440 value = simple_strtoul(buffer, NULL, 10); 3318 if (kstrtoul(buffer, 10, &value))
3319 return -EINVAL;
3320
2441 mutex_lock(&spic_dev.lock); 3321 mutex_lock(&spic_dev.lock);
2442 __sony_pic_set_wwanpower(value); 3322 __sony_pic_set_wwanpower(value);
2443 mutex_unlock(&spic_dev.lock); 3323 mutex_unlock(&spic_dev.lock);
@@ -2474,7 +3354,9 @@ static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
2474 if (count > 31) 3354 if (count > 31)
2475 return -EINVAL; 3355 return -EINVAL;
2476 3356
2477 value = simple_strtoul(buffer, NULL, 10); 3357 if (kstrtoul(buffer, 10, &value))
3358 return -EINVAL;
3359
2478 mutex_lock(&spic_dev.lock); 3360 mutex_lock(&spic_dev.lock);
2479 __sony_pic_set_bluetoothpower(value); 3361 __sony_pic_set_bluetoothpower(value);
2480 mutex_unlock(&spic_dev.lock); 3362 mutex_unlock(&spic_dev.lock);
@@ -2513,7 +3395,9 @@ static ssize_t sony_pic_fanspeed_store(struct device *dev,
2513 if (count > 31) 3395 if (count > 31)
2514 return -EINVAL; 3396 return -EINVAL;
2515 3397
2516 value = simple_strtoul(buffer, NULL, 10); 3398 if (kstrtoul(buffer, 10, &value))
3399 return -EINVAL;
3400
2517 if (sony_pic_set_fanspeed(value)) 3401 if (sony_pic_set_fanspeed(value))
2518 return -EIO; 3402 return -EIO;
2519 3403
@@ -2671,7 +3555,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
2671 ret = -EIO; 3555 ret = -EIO;
2672 break; 3556 break;
2673 } 3557 }
2674 if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) { 3558 if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL,
3559 &value)) {
2675 ret = -EIO; 3560 ret = -EIO;
2676 break; 3561 break;
2677 } 3562 }
@@ -2688,8 +3573,9 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
2688 ret = -EFAULT; 3573 ret = -EFAULT;
2689 break; 3574 break;
2690 } 3575 }
2691 if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", 3576 value = (val8 >> 5) + 1;
2692 (val8 >> 5) + 1, NULL)) { 3577 if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value,
3578 NULL)) {
2693 ret = -EIO; 3579 ret = -EIO;
2694 break; 3580 break;
2695 } 3581 }
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d68c0002f4a2..8b5610d88418 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3402,7 +3402,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3402 /* Do not issue duplicate brightness change events to 3402 /* Do not issue duplicate brightness change events to
3403 * userspace. tpacpi_detect_brightness_capabilities() must have 3403 * userspace. tpacpi_detect_brightness_capabilities() must have
3404 * been called before this point */ 3404 * been called before this point */
3405 if (tp_features.bright_acpimode && acpi_video_backlight_support()) { 3405 if (acpi_video_backlight_support()) {
3406 pr_info("This ThinkPad has standard ACPI backlight " 3406 pr_info("This ThinkPad has standard ACPI backlight "
3407 "brightness control, supported by the ACPI " 3407 "brightness control, supported by the ACPI "
3408 "video driver\n"); 3408 "video driver\n");
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 57787d87d9a4..dab10f6edcd4 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -95,6 +95,7 @@ MODULE_LICENSE("GPL");
95 95
96/* registers */ 96/* registers */
97#define HCI_FAN 0x0004 97#define HCI_FAN 0x0004
98#define HCI_TR_BACKLIGHT 0x0005
98#define HCI_SYSTEM_EVENT 0x0016 99#define HCI_SYSTEM_EVENT 0x0016
99#define HCI_VIDEO_OUT 0x001c 100#define HCI_VIDEO_OUT 0x001c
100#define HCI_HOTKEY_EVENT 0x001e 101#define HCI_HOTKEY_EVENT 0x001e
@@ -134,6 +135,7 @@ struct toshiba_acpi_dev {
134 unsigned int system_event_supported:1; 135 unsigned int system_event_supported:1;
135 unsigned int ntfy_supported:1; 136 unsigned int ntfy_supported:1;
136 unsigned int info_supported:1; 137 unsigned int info_supported:1;
138 unsigned int tr_backlight_supported:1;
137 139
138 struct mutex mutex; 140 struct mutex mutex;
139}; 141};
@@ -478,34 +480,70 @@ static const struct rfkill_ops toshiba_rfk_ops = {
478 .poll = bt_rfkill_poll, 480 .poll = bt_rfkill_poll,
479}; 481};
480 482
483static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
484{
485 u32 hci_result;
486 u32 status;
487
488 hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result);
489 *enabled = !status;
490 return hci_result == HCI_SUCCESS ? 0 : -EIO;
491}
492
493static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
494{
495 u32 hci_result;
496 u32 value = !enable;
497
498 hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result);
499 return hci_result == HCI_SUCCESS ? 0 : -EIO;
500}
501
481static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ; 502static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
482 503
483static int get_lcd(struct backlight_device *bd) 504static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
484{ 505{
485 struct toshiba_acpi_dev *dev = bl_get_data(bd);
486 u32 hci_result; 506 u32 hci_result;
487 u32 value; 507 u32 value;
508 int brightness = 0;
509
510 if (dev->tr_backlight_supported) {
511 bool enabled;
512 int ret = get_tr_backlight_status(dev, &enabled);
513 if (ret)
514 return ret;
515 if (enabled)
516 return 0;
517 brightness++;
518 }
488 519
489 hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result); 520 hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
490 if (hci_result == HCI_SUCCESS) 521 if (hci_result == HCI_SUCCESS)
491 return (value >> HCI_LCD_BRIGHTNESS_SHIFT); 522 return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
492 523
493 return -EIO; 524 return -EIO;
494} 525}
495 526
527static int get_lcd_brightness(struct backlight_device *bd)
528{
529 struct toshiba_acpi_dev *dev = bl_get_data(bd);
530 return __get_lcd_brightness(dev);
531}
532
496static int lcd_proc_show(struct seq_file *m, void *v) 533static int lcd_proc_show(struct seq_file *m, void *v)
497{ 534{
498 struct toshiba_acpi_dev *dev = m->private; 535 struct toshiba_acpi_dev *dev = m->private;
499 int value; 536 int value;
537 int levels;
500 538
501 if (!dev->backlight_dev) 539 if (!dev->backlight_dev)
502 return -ENODEV; 540 return -ENODEV;
503 541
504 value = get_lcd(dev->backlight_dev); 542 levels = dev->backlight_dev->props.max_brightness + 1;
543 value = get_lcd_brightness(dev->backlight_dev);
505 if (value >= 0) { 544 if (value >= 0) {
506 seq_printf(m, "brightness: %d\n", value); 545 seq_printf(m, "brightness: %d\n", value);
507 seq_printf(m, "brightness_levels: %d\n", 546 seq_printf(m, "brightness_levels: %d\n", levels);
508 HCI_LCD_BRIGHTNESS_LEVELS);
509 return 0; 547 return 0;
510 } 548 }
511 549
@@ -518,10 +556,19 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
518 return single_open(file, lcd_proc_show, PDE(inode)->data); 556 return single_open(file, lcd_proc_show, PDE(inode)->data);
519} 557}
520 558
521static int set_lcd(struct toshiba_acpi_dev *dev, int value) 559static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
522{ 560{
523 u32 hci_result; 561 u32 hci_result;
524 562
563 if (dev->tr_backlight_supported) {
564 bool enable = !value;
565 int ret = set_tr_backlight_status(dev, enable);
566 if (ret)
567 return ret;
568 if (value)
569 value--;
570 }
571
525 value = value << HCI_LCD_BRIGHTNESS_SHIFT; 572 value = value << HCI_LCD_BRIGHTNESS_SHIFT;
526 hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result); 573 hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result);
527 return hci_result == HCI_SUCCESS ? 0 : -EIO; 574 return hci_result == HCI_SUCCESS ? 0 : -EIO;
@@ -530,7 +577,7 @@ static int set_lcd(struct toshiba_acpi_dev *dev, int value)
530static int set_lcd_status(struct backlight_device *bd) 577static int set_lcd_status(struct backlight_device *bd)
531{ 578{
532 struct toshiba_acpi_dev *dev = bl_get_data(bd); 579 struct toshiba_acpi_dev *dev = bl_get_data(bd);
533 return set_lcd(dev, bd->props.brightness); 580 return set_lcd_brightness(dev, bd->props.brightness);
534} 581}
535 582
536static ssize_t lcd_proc_write(struct file *file, const char __user *buf, 583static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
@@ -541,6 +588,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
541 size_t len; 588 size_t len;
542 int value; 589 int value;
543 int ret; 590 int ret;
591 int levels = dev->backlight_dev->props.max_brightness + 1;
544 592
545 len = min(count, sizeof(cmd) - 1); 593 len = min(count, sizeof(cmd) - 1);
546 if (copy_from_user(cmd, buf, len)) 594 if (copy_from_user(cmd, buf, len))
@@ -548,8 +596,8 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
548 cmd[len] = '\0'; 596 cmd[len] = '\0';
549 597
550 if (sscanf(cmd, " brightness : %i", &value) == 1 && 598 if (sscanf(cmd, " brightness : %i", &value) == 1 &&
551 value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) { 599 value >= 0 && value < levels) {
552 ret = set_lcd(dev, value); 600 ret = set_lcd_brightness(dev, value);
553 if (ret == 0) 601 if (ret == 0)
554 ret = count; 602 ret = count;
555 } else { 603 } else {
@@ -860,8 +908,9 @@ static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
860} 908}
861 909
862static const struct backlight_ops toshiba_backlight_data = { 910static const struct backlight_ops toshiba_backlight_data = {
863 .get_brightness = get_lcd, 911 .options = BL_CORE_SUSPENDRESUME,
864 .update_status = set_lcd_status, 912 .get_brightness = get_lcd_brightness,
913 .update_status = set_lcd_status,
865}; 914};
866 915
867static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str, 916static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
@@ -1020,6 +1069,56 @@ static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
1020 return error; 1069 return error;
1021} 1070}
1022 1071
1072static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
1073{
1074 struct backlight_properties props;
1075 int brightness;
1076 int ret;
1077 bool enabled;
1078
1079 /*
1080 * Some machines don't support the backlight methods at all, and
1081 * others support it read-only. Either of these is pretty useless,
1082 * so only register the backlight device if the backlight method
1083 * supports both reads and writes.
1084 */
1085 brightness = __get_lcd_brightness(dev);
1086 if (brightness < 0)
1087 return 0;
1088 ret = set_lcd_brightness(dev, brightness);
1089 if (ret) {
1090 pr_debug("Backlight method is read-only, disabling backlight support\n");
1091 return 0;
1092 }
1093
1094 /* Determine whether or not BIOS supports transflective backlight */
1095 ret = get_tr_backlight_status(dev, &enabled);
1096 dev->tr_backlight_supported = !ret;
1097
1098 memset(&props, 0, sizeof(props));
1099 props.type = BACKLIGHT_PLATFORM;
1100 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
1101
1102 /* adding an extra level and having 0 change to transflective mode */
1103 if (dev->tr_backlight_supported)
1104 props.max_brightness++;
1105
1106 dev->backlight_dev = backlight_device_register("toshiba",
1107 &dev->acpi_dev->dev,
1108 dev,
1109 &toshiba_backlight_data,
1110 &props);
1111 if (IS_ERR(dev->backlight_dev)) {
1112 ret = PTR_ERR(dev->backlight_dev);
1113 pr_err("Could not register toshiba backlight device\n");
1114 dev->backlight_dev = NULL;
1115 return ret;
1116 }
1117
1118 dev->backlight_dev->props.brightness = brightness;
1119 return 0;
1120}
1121
1023static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type) 1122static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
1024{ 1123{
1025 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); 1124 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
@@ -1078,7 +1177,6 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
1078 u32 dummy; 1177 u32 dummy;
1079 bool bt_present; 1178 bool bt_present;
1080 int ret = 0; 1179 int ret = 0;
1081 struct backlight_properties props;
1082 1180
1083 if (toshiba_acpi) 1181 if (toshiba_acpi)
1084 return -EBUSY; 1182 return -EBUSY;
@@ -1104,22 +1202,9 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
1104 1202
1105 mutex_init(&dev->mutex); 1203 mutex_init(&dev->mutex);
1106 1204
1107 memset(&props, 0, sizeof(props)); 1205 ret = toshiba_acpi_setup_backlight(dev);
1108 props.type = BACKLIGHT_PLATFORM; 1206 if (ret)
1109 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
1110 dev->backlight_dev = backlight_device_register("toshiba",
1111 &acpi_dev->dev,
1112 dev,
1113 &toshiba_backlight_data,
1114 &props);
1115 if (IS_ERR(dev->backlight_dev)) {
1116 ret = PTR_ERR(dev->backlight_dev);
1117
1118 pr_err("Could not register toshiba backlight device\n");
1119 dev->backlight_dev = NULL;
1120 goto error; 1207 goto error;
1121 }
1122 dev->backlight_dev->props.brightness = get_lcd(dev->backlight_dev);
1123 1208
1124 /* Register rfkill switch for Bluetooth */ 1209 /* Register rfkill switch for Bluetooth */
1125 if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) { 1210 if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index 41781ed8301c..b57ad8641480 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -15,15 +15,26 @@
15 15
16#include <asm/olpc.h> 16#include <asm/olpc.h>
17 17
18static bool card_blocked;
19
18static int rfkill_set_block(void *data, bool blocked) 20static int rfkill_set_block(void *data, bool blocked)
19{ 21{
20 unsigned char cmd; 22 unsigned char cmd;
23 int r;
24
25 if (blocked == card_blocked)
26 return 0;
27
21 if (blocked) 28 if (blocked)
22 cmd = EC_WLAN_ENTER_RESET; 29 cmd = EC_WLAN_ENTER_RESET;
23 else 30 else
24 cmd = EC_WLAN_LEAVE_RESET; 31 cmd = EC_WLAN_LEAVE_RESET;
25 32
26 return olpc_ec_cmd(cmd, NULL, 0, NULL, 0); 33 r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
34 if (r == 0)
35 card_blocked = blocked;
36
37 return r;
27} 38}
28 39
29static const struct rfkill_ops rfkill_ops = { 40static const struct rfkill_ops rfkill_ops = {
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 99dc29f2f2f2..e3a3b4956f08 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -1,5 +1,5 @@
1menuconfig POWER_SUPPLY 1menuconfig POWER_SUPPLY
2 tristate "Power supply class support" 2 bool "Power supply class support"
3 help 3 help
4 Say Y here to enable power supply class support. This allows 4 Say Y here to enable power supply class support. This allows
5 power supply (batteries, AC, USB) monitoring by userspace 5 power supply (batteries, AC, USB) monitoring by userspace
@@ -77,7 +77,7 @@ config BATTERY_DS2780
77 Say Y here to enable support for batteries with ds2780 chip. 77 Say Y here to enable support for batteries with ds2780 chip.
78 78
79config BATTERY_DS2781 79config BATTERY_DS2781
80 tristate "2781 battery driver" 80 tristate "DS2781 battery driver"
81 depends on HAS_IOMEM 81 depends on HAS_IOMEM
82 select W1 82 select W1
83 select W1_SLAVE_DS2781 83 select W1_SLAVE_DS2781
@@ -181,14 +181,15 @@ config BATTERY_MAX17040
181 to operate with a single lithium cell 181 to operate with a single lithium cell
182 182
183config BATTERY_MAX17042 183config BATTERY_MAX17042
184 tristate "Maxim MAX17042/8997/8966 Fuel Gauge" 184 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
185 depends on I2C 185 depends on I2C
186 help 186 help
187 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries 187 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
188 in handheld and portable equipment. The MAX17042 is configured 188 in handheld and portable equipment. The MAX17042 is configured
189 to operate with a single lithium cell. MAX8997 and MAX8966 are 189 to operate with a single lithium cell. MAX8997 and MAX8966 are
190 multi-function devices that include fuel gauages that are compatible 190 multi-function devices that include fuel gauages that are compatible
191 with MAX17042. 191 with MAX17042. This driver also supports max17047/50 chips which are
192 improved version of max17042.
192 193
193config BATTERY_Z2 194config BATTERY_Z2
194 tristate "Z2 battery driver" 195 tristate "Z2 battery driver"
@@ -291,6 +292,7 @@ config CHARGER_MAX8998
291config CHARGER_SMB347 292config CHARGER_SMB347
292 tristate "Summit Microelectronics SMB347 Battery Charger" 293 tristate "Summit Microelectronics SMB347 Battery Charger"
293 depends on I2C 294 depends on I2C
295 select REGMAP_I2C
294 help 296 help
295 Say Y to include support for Summit Microelectronics SMB347 297 Say Y to include support for Summit Microelectronics SMB347
296 Battery Charger. 298 Battery Charger.
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index d8bb99394ac0..bba3ccac72fe 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -964,10 +964,15 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
964{ 964{
965 int irq, i, ret = 0; 965 int irq, i, ret = 0;
966 u8 val; 966 u8 val;
967 struct abx500_bm_plat_data *plat_data; 967 struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
968 struct ab8500_btemp *di;
969
970 if (!plat_data) {
971 dev_err(&pdev->dev, "No platform data\n");
972 return -EINVAL;
973 }
968 974
969 struct ab8500_btemp *di = 975 di = kzalloc(sizeof(*di), GFP_KERNEL);
970 kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
971 if (!di) 976 if (!di)
972 return -ENOMEM; 977 return -ENOMEM;
973 978
@@ -977,7 +982,6 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
977 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); 982 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
978 983
979 /* get btemp specific platform data */ 984 /* get btemp specific platform data */
980 plat_data = pdev->dev.platform_data;
981 di->pdata = plat_data->btemp; 985 di->pdata = plat_data->btemp;
982 if (!di->pdata) { 986 if (!di->pdata) {
983 dev_err(di->dev, "no btemp platform data supplied\n"); 987 dev_err(di->dev, "no btemp platform data supplied\n");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index e2b4accbec88..d2303d0b7c75 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -2534,10 +2534,15 @@ static int __devexit ab8500_charger_remove(struct platform_device *pdev)
2534static int __devinit ab8500_charger_probe(struct platform_device *pdev) 2534static int __devinit ab8500_charger_probe(struct platform_device *pdev)
2535{ 2535{
2536 int irq, i, charger_status, ret = 0; 2536 int irq, i, charger_status, ret = 0;
2537 struct abx500_bm_plat_data *plat_data; 2537 struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
2538 struct ab8500_charger *di;
2538 2539
2539 struct ab8500_charger *di = 2540 if (!plat_data) {
2540 kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL); 2541 dev_err(&pdev->dev, "No platform data\n");
2542 return -EINVAL;
2543 }
2544
2545 di = kzalloc(sizeof(*di), GFP_KERNEL);
2541 if (!di) 2546 if (!di)
2542 return -ENOMEM; 2547 return -ENOMEM;
2543 2548
@@ -2550,9 +2555,7 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
2550 spin_lock_init(&di->usb_state.usb_lock); 2555 spin_lock_init(&di->usb_state.usb_lock);
2551 2556
2552 /* get charger specific platform data */ 2557 /* get charger specific platform data */
2553 plat_data = pdev->dev.platform_data;
2554 di->pdata = plat_data->charger; 2558 di->pdata = plat_data->charger;
2555
2556 if (!di->pdata) { 2559 if (!di->pdata) {
2557 dev_err(di->dev, "no charger platform data supplied\n"); 2560 dev_err(di->dev, "no charger platform data supplied\n");
2558 ret = -EINVAL; 2561 ret = -EINVAL;
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index c22f2f05657e..bf022255994c 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -2446,10 +2446,15 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
2446{ 2446{
2447 int i, irq; 2447 int i, irq;
2448 int ret = 0; 2448 int ret = 0;
2449 struct abx500_bm_plat_data *plat_data; 2449 struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
2450 struct ab8500_fg *di;
2451
2452 if (!plat_data) {
2453 dev_err(&pdev->dev, "No platform data\n");
2454 return -EINVAL;
2455 }
2450 2456
2451 struct ab8500_fg *di = 2457 di = kzalloc(sizeof(*di), GFP_KERNEL);
2452 kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
2453 if (!di) 2458 if (!di)
2454 return -ENOMEM; 2459 return -ENOMEM;
2455 2460
@@ -2461,7 +2466,6 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
2461 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); 2466 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
2462 2467
2463 /* get fg specific platform data */ 2468 /* get fg specific platform data */
2464 plat_data = pdev->dev.platform_data;
2465 di->pdata = plat_data->fg; 2469 di->pdata = plat_data->fg;
2466 if (!di->pdata) { 2470 if (!di->pdata) {
2467 dev_err(di->dev, "no fg platform data supplied\n"); 2471 dev_err(di->dev, "no fg platform data supplied\n");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 9eca9f1ff0ea..86935ec18954 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -23,6 +23,16 @@
23#include <linux/power/charger-manager.h> 23#include <linux/power/charger-manager.h>
24#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
25 25
26static const char * const default_event_names[] = {
27 [CM_EVENT_UNKNOWN] = "Unknown",
28 [CM_EVENT_BATT_FULL] = "Battery Full",
29 [CM_EVENT_BATT_IN] = "Battery Inserted",
30 [CM_EVENT_BATT_OUT] = "Battery Pulled Out",
31 [CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
32 [CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
33 [CM_EVENT_OTHERS] = "Other battery events"
34};
35
26/* 36/*
27 * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for 37 * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
28 * delayed works so that we can run delayed works with CM_JIFFIES_SMALL 38 * delayed works so that we can run delayed works with CM_JIFFIES_SMALL
@@ -57,6 +67,12 @@ static bool cm_suspended;
57static bool cm_rtc_set; 67static bool cm_rtc_set;
58static unsigned long cm_suspend_duration_ms; 68static unsigned long cm_suspend_duration_ms;
59 69
70/* About normal (not suspended) monitoring */
71static unsigned long polling_jiffy = ULONG_MAX; /* ULONG_MAX: no polling */
72static unsigned long next_polling; /* Next appointed polling time */
73static struct workqueue_struct *cm_wq; /* init at driver add */
74static struct delayed_work cm_monitor_work; /* init at driver add */
75
60/* Global charger-manager description */ 76/* Global charger-manager description */
61static struct charger_global_desc *g_desc; /* init with setup_charger_manager */ 77static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
62 78
@@ -71,6 +87,11 @@ static bool is_batt_present(struct charger_manager *cm)
71 int i, ret; 87 int i, ret;
72 88
73 switch (cm->desc->battery_present) { 89 switch (cm->desc->battery_present) {
90 case CM_BATTERY_PRESENT:
91 present = true;
92 break;
93 case CM_NO_BATTERY:
94 break;
74 case CM_FUEL_GAUGE: 95 case CM_FUEL_GAUGE:
75 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 96 ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
76 POWER_SUPPLY_PROP_PRESENT, &val); 97 POWER_SUPPLY_PROP_PRESENT, &val);
@@ -279,6 +300,26 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
279} 300}
280 301
281/** 302/**
303 * try_charger_restart - Restart charging.
304 * @cm: the Charger Manager representing the battery.
305 *
306 * Restart charging by turning off and on the charger.
307 */
308static int try_charger_restart(struct charger_manager *cm)
309{
310 int err;
311
312 if (cm->emergency_stop)
313 return -EAGAIN;
314
315 err = try_charger_enable(cm, false);
316 if (err)
317 return err;
318
319 return try_charger_enable(cm, true);
320}
321
322/**
282 * uevent_notify - Let users know something has changed. 323 * uevent_notify - Let users know something has changed.
283 * @cm: the Charger Manager representing the battery. 324 * @cm: the Charger Manager representing the battery.
284 * @event: the event string. 325 * @event: the event string.
@@ -334,6 +375,46 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
334} 375}
335 376
336/** 377/**
378 * fullbatt_vchk - Check voltage drop some times after "FULL" event.
379 * @work: the work_struct appointing the function
380 *
381 * If a user has designated "fullbatt_vchkdrop_ms/uV" values with
382 * charger_desc, Charger Manager checks voltage drop after the battery
383 * "FULL" event. It checks whether the voltage has dropped more than
384 * fullbatt_vchkdrop_uV by calling this function after fullbatt_vchkrop_ms.
385 */
386static void fullbatt_vchk(struct work_struct *work)
387{
388 struct delayed_work *dwork = to_delayed_work(work);
389 struct charger_manager *cm = container_of(dwork,
390 struct charger_manager, fullbatt_vchk_work);
391 struct charger_desc *desc = cm->desc;
392 int batt_uV, err, diff;
393
394 /* remove the appointment for fullbatt_vchk */
395 cm->fullbatt_vchk_jiffies_at = 0;
396
397 if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
398 return;
399
400 err = get_batt_uV(cm, &batt_uV);
401 if (err) {
402 dev_err(cm->dev, "%s: get_batt_uV error(%d).\n", __func__, err);
403 return;
404 }
405
406 diff = cm->fullbatt_vchk_uV;
407 diff -= batt_uV;
408
409 dev_dbg(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
410
411 if (diff > desc->fullbatt_vchkdrop_uV) {
412 try_charger_restart(cm);
413 uevent_notify(cm, "Recharge");
414 }
415}
416
417/**
337 * _cm_monitor - Monitor the temperature and return true for exceptions. 418 * _cm_monitor - Monitor the temperature and return true for exceptions.
338 * @cm: the Charger Manager representing the battery. 419 * @cm: the Charger Manager representing the battery.
339 * 420 *
@@ -392,6 +473,131 @@ static bool cm_monitor(void)
392 return stop; 473 return stop;
393} 474}
394 475
476/**
477 * _setup_polling - Setup the next instance of polling.
478 * @work: work_struct of the function _setup_polling.
479 */
480static void _setup_polling(struct work_struct *work)
481{
482 unsigned long min = ULONG_MAX;
483 struct charger_manager *cm;
484 bool keep_polling = false;
485 unsigned long _next_polling;
486
487 mutex_lock(&cm_list_mtx);
488
489 list_for_each_entry(cm, &cm_list, entry) {
490 if (is_polling_required(cm) && cm->desc->polling_interval_ms) {
491 keep_polling = true;
492
493 if (min > cm->desc->polling_interval_ms)
494 min = cm->desc->polling_interval_ms;
495 }
496 }
497
498 polling_jiffy = msecs_to_jiffies(min);
499 if (polling_jiffy <= CM_JIFFIES_SMALL)
500 polling_jiffy = CM_JIFFIES_SMALL + 1;
501
502 if (!keep_polling)
503 polling_jiffy = ULONG_MAX;
504 if (polling_jiffy == ULONG_MAX)
505 goto out;
506
507 WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
508 ". try it later. %s\n", __func__);
509
510 _next_polling = jiffies + polling_jiffy;
511
512 if (!delayed_work_pending(&cm_monitor_work) ||
513 (delayed_work_pending(&cm_monitor_work) &&
514 time_after(next_polling, _next_polling))) {
515 cancel_delayed_work_sync(&cm_monitor_work);
516 next_polling = jiffies + polling_jiffy;
517 queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
518 }
519
520out:
521 mutex_unlock(&cm_list_mtx);
522}
523static DECLARE_WORK(setup_polling, _setup_polling);
524
525/**
526 * cm_monitor_poller - The Monitor / Poller.
527 * @work: work_struct of the function cm_monitor_poller
528 *
529 * During non-suspended state, cm_monitor_poller is used to poll and monitor
530 * the batteries.
531 */
532static void cm_monitor_poller(struct work_struct *work)
533{
534 cm_monitor();
535 schedule_work(&setup_polling);
536}
537
538/**
539 * fullbatt_handler - Event handler for CM_EVENT_BATT_FULL
540 * @cm: the Charger Manager representing the battery.
541 */
542static void fullbatt_handler(struct charger_manager *cm)
543{
544 struct charger_desc *desc = cm->desc;
545
546 if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
547 goto out;
548
549 if (cm_suspended)
550 device_set_wakeup_capable(cm->dev, true);
551
552 if (delayed_work_pending(&cm->fullbatt_vchk_work))
553 cancel_delayed_work(&cm->fullbatt_vchk_work);
554 queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
555 msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
556 cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
557 desc->fullbatt_vchkdrop_ms);
558
559 if (cm->fullbatt_vchk_jiffies_at == 0)
560 cm->fullbatt_vchk_jiffies_at = 1;
561
562out:
563 dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged.\n");
564 uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
565}
566
567/**
568 * battout_handler - Event handler for CM_EVENT_BATT_OUT
569 * @cm: the Charger Manager representing the battery.
570 */
571static void battout_handler(struct charger_manager *cm)
572{
573 if (cm_suspended)
574 device_set_wakeup_capable(cm->dev, true);
575
576 if (!is_batt_present(cm)) {
577 dev_emerg(cm->dev, "Battery Pulled Out!\n");
578 uevent_notify(cm, default_event_names[CM_EVENT_BATT_OUT]);
579 } else {
580 uevent_notify(cm, "Battery Reinserted?");
581 }
582}
583
584/**
585 * misc_event_handler - Handler for other evnets
586 * @cm: the Charger Manager representing the battery.
587 * @type: the Charger Manager representing the battery.
588 */
589static void misc_event_handler(struct charger_manager *cm,
590 enum cm_event_types type)
591{
592 if (cm_suspended)
593 device_set_wakeup_capable(cm->dev, true);
594
595 if (!delayed_work_pending(&cm_monitor_work) &&
596 is_polling_required(cm) && cm->desc->polling_interval_ms)
597 schedule_work(&setup_polling);
598 uevent_notify(cm, default_event_names[type]);
599}
600
395static int charger_get_property(struct power_supply *psy, 601static int charger_get_property(struct power_supply *psy,
396 enum power_supply_property psp, 602 enum power_supply_property psp,
397 union power_supply_propval *val) 603 union power_supply_propval *val)
@@ -613,6 +819,21 @@ static bool cm_setup_timer(void)
613 mutex_lock(&cm_list_mtx); 819 mutex_lock(&cm_list_mtx);
614 820
615 list_for_each_entry(cm, &cm_list, entry) { 821 list_for_each_entry(cm, &cm_list, entry) {
822 unsigned int fbchk_ms = 0;
823
824 /* fullbatt_vchk is required. setup timer for that */
825 if (cm->fullbatt_vchk_jiffies_at) {
826 fbchk_ms = jiffies_to_msecs(cm->fullbatt_vchk_jiffies_at
827 - jiffies);
828 if (time_is_before_eq_jiffies(
829 cm->fullbatt_vchk_jiffies_at) ||
830 msecs_to_jiffies(fbchk_ms) < CM_JIFFIES_SMALL) {
831 fullbatt_vchk(&cm->fullbatt_vchk_work.work);
832 fbchk_ms = 0;
833 }
834 }
835 CM_MIN_VALID(wakeup_ms, fbchk_ms);
836
616 /* Skip if polling is not required for this CM */ 837 /* Skip if polling is not required for this CM */
617 if (!is_polling_required(cm) && !cm->emergency_stop) 838 if (!is_polling_required(cm) && !cm->emergency_stop)
618 continue; 839 continue;
@@ -672,6 +893,23 @@ static bool cm_setup_timer(void)
672 return false; 893 return false;
673} 894}
674 895
896static void _cm_fbchk_in_suspend(struct charger_manager *cm)
897{
898 unsigned long jiffy_now = jiffies;
899
900 if (!cm->fullbatt_vchk_jiffies_at)
901 return;
902
903 if (g_desc && g_desc->assume_timer_stops_in_suspend)
904 jiffy_now += msecs_to_jiffies(cm_suspend_duration_ms);
905
906 /* Execute now if it's going to be executed not too long after */
907 jiffy_now += CM_JIFFIES_SMALL;
908
909 if (time_after_eq(jiffy_now, cm->fullbatt_vchk_jiffies_at))
910 fullbatt_vchk(&cm->fullbatt_vchk_work.work);
911}
912
675/** 913/**
676 * cm_suspend_again - Determine whether suspend again or not 914 * cm_suspend_again - Determine whether suspend again or not
677 * 915 *
@@ -693,6 +931,8 @@ bool cm_suspend_again(void)
693 ret = true; 931 ret = true;
694 mutex_lock(&cm_list_mtx); 932 mutex_lock(&cm_list_mtx);
695 list_for_each_entry(cm, &cm_list, entry) { 933 list_for_each_entry(cm, &cm_list, entry) {
934 _cm_fbchk_in_suspend(cm);
935
696 if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) || 936 if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
697 cm->status_save_batt != is_batt_present(cm)) { 937 cm->status_save_batt != is_batt_present(cm)) {
698 ret = false; 938 ret = false;
@@ -796,6 +1036,21 @@ static int charger_manager_probe(struct platform_device *pdev)
796 memcpy(cm->desc, desc, sizeof(struct charger_desc)); 1036 memcpy(cm->desc, desc, sizeof(struct charger_desc));
797 cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */ 1037 cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
798 1038
1039 /*
1040 * The following two do not need to be errors.
1041 * Users may intentionally ignore those two features.
1042 */
1043 if (desc->fullbatt_uV == 0) {
1044 dev_info(&pdev->dev, "Ignoring full-battery voltage threshold"
1045 " as it is not supplied.");
1046 }
1047 if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) {
1048 dev_info(&pdev->dev, "Disabling full-battery voltage drop "
1049 "checking mechanism as it is not supplied.");
1050 desc->fullbatt_vchkdrop_ms = 0;
1051 desc->fullbatt_vchkdrop_uV = 0;
1052 }
1053
799 if (!desc->charger_regulators || desc->num_charger_regulators < 1) { 1054 if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
800 ret = -EINVAL; 1055 ret = -EINVAL;
801 dev_err(&pdev->dev, "charger_regulators undefined.\n"); 1056 dev_err(&pdev->dev, "charger_regulators undefined.\n");
@@ -903,6 +1158,8 @@ static int charger_manager_probe(struct platform_device *pdev)
903 cm->charger_psy.num_properties++; 1158 cm->charger_psy.num_properties++;
904 } 1159 }
905 1160
1161 INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
1162
906 ret = power_supply_register(NULL, &cm->charger_psy); 1163 ret = power_supply_register(NULL, &cm->charger_psy);
907 if (ret) { 1164 if (ret) {
908 dev_err(&pdev->dev, "Cannot register charger-manager with" 1165 dev_err(&pdev->dev, "Cannot register charger-manager with"
@@ -928,6 +1185,15 @@ static int charger_manager_probe(struct platform_device *pdev)
928 list_add(&cm->entry, &cm_list); 1185 list_add(&cm->entry, &cm_list);
929 mutex_unlock(&cm_list_mtx); 1186 mutex_unlock(&cm_list_mtx);
930 1187
1188 /*
1189 * Charger-manager is capable of waking up the systme from sleep
1190 * when event is happend through cm_notify_event()
1191 */
1192 device_init_wakeup(&pdev->dev, true);
1193 device_set_wakeup_capable(&pdev->dev, false);
1194
1195 schedule_work(&setup_polling);
1196
931 return 0; 1197 return 0;
932 1198
933err_chg_enable: 1199err_chg_enable:
@@ -958,9 +1224,17 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
958 list_del(&cm->entry); 1224 list_del(&cm->entry);
959 mutex_unlock(&cm_list_mtx); 1225 mutex_unlock(&cm_list_mtx);
960 1226
1227 if (work_pending(&setup_polling))
1228 cancel_work_sync(&setup_polling);
1229 if (delayed_work_pending(&cm_monitor_work))
1230 cancel_delayed_work_sync(&cm_monitor_work);
1231
961 regulator_bulk_free(desc->num_charger_regulators, 1232 regulator_bulk_free(desc->num_charger_regulators,
962 desc->charger_regulators); 1233 desc->charger_regulators);
963 power_supply_unregister(&cm->charger_psy); 1234 power_supply_unregister(&cm->charger_psy);
1235
1236 try_charger_enable(cm, false);
1237
964 kfree(cm->charger_psy.properties); 1238 kfree(cm->charger_psy.properties);
965 kfree(cm->charger_stat); 1239 kfree(cm->charger_stat);
966 kfree(cm->desc); 1240 kfree(cm->desc);
@@ -975,6 +1249,18 @@ static const struct platform_device_id charger_manager_id[] = {
975}; 1249};
976MODULE_DEVICE_TABLE(platform, charger_manager_id); 1250MODULE_DEVICE_TABLE(platform, charger_manager_id);
977 1251
1252static int cm_suspend_noirq(struct device *dev)
1253{
1254 int ret = 0;
1255
1256 if (device_may_wakeup(dev)) {
1257 device_set_wakeup_capable(dev, false);
1258 ret = -EAGAIN;
1259 }
1260
1261 return ret;
1262}
1263
978static int cm_suspend_prepare(struct device *dev) 1264static int cm_suspend_prepare(struct device *dev)
979{ 1265{
980 struct charger_manager *cm = dev_get_drvdata(dev); 1266 struct charger_manager *cm = dev_get_drvdata(dev);
@@ -1000,6 +1286,8 @@ static int cm_suspend_prepare(struct device *dev)
1000 cm_suspended = true; 1286 cm_suspended = true;
1001 } 1287 }
1002 1288
1289 if (delayed_work_pending(&cm->fullbatt_vchk_work))
1290 cancel_delayed_work(&cm->fullbatt_vchk_work);
1003 cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm); 1291 cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
1004 cm->status_save_batt = is_batt_present(cm); 1292 cm->status_save_batt = is_batt_present(cm);
1005 1293
@@ -1027,11 +1315,40 @@ static void cm_suspend_complete(struct device *dev)
1027 cm_rtc_set = false; 1315 cm_rtc_set = false;
1028 } 1316 }
1029 1317
1318 /* Re-enqueue delayed work (fullbatt_vchk_work) */
1319 if (cm->fullbatt_vchk_jiffies_at) {
1320 unsigned long delay = 0;
1321 unsigned long now = jiffies + CM_JIFFIES_SMALL;
1322
1323 if (time_after_eq(now, cm->fullbatt_vchk_jiffies_at)) {
1324 delay = (unsigned long)((long)now
1325 - (long)(cm->fullbatt_vchk_jiffies_at));
1326 delay = jiffies_to_msecs(delay);
1327 } else {
1328 delay = 0;
1329 }
1330
1331 /*
1332 * Account for cm_suspend_duration_ms if
1333 * assume_timer_stops_in_suspend is active
1334 */
1335 if (g_desc && g_desc->assume_timer_stops_in_suspend) {
1336 if (delay > cm_suspend_duration_ms)
1337 delay -= cm_suspend_duration_ms;
1338 else
1339 delay = 0;
1340 }
1341
1342 queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
1343 msecs_to_jiffies(delay));
1344 }
1345 device_set_wakeup_capable(cm->dev, false);
1030 uevent_notify(cm, NULL); 1346 uevent_notify(cm, NULL);
1031} 1347}
1032 1348
1033static const struct dev_pm_ops charger_manager_pm = { 1349static const struct dev_pm_ops charger_manager_pm = {
1034 .prepare = cm_suspend_prepare, 1350 .prepare = cm_suspend_prepare,
1351 .suspend_noirq = cm_suspend_noirq,
1035 .complete = cm_suspend_complete, 1352 .complete = cm_suspend_complete,
1036}; 1353};
1037 1354
@@ -1048,16 +1365,91 @@ static struct platform_driver charger_manager_driver = {
1048 1365
1049static int __init charger_manager_init(void) 1366static int __init charger_manager_init(void)
1050{ 1367{
1368 cm_wq = create_freezable_workqueue("charger_manager");
1369 INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
1370
1051 return platform_driver_register(&charger_manager_driver); 1371 return platform_driver_register(&charger_manager_driver);
1052} 1372}
1053late_initcall(charger_manager_init); 1373late_initcall(charger_manager_init);
1054 1374
1055static void __exit charger_manager_cleanup(void) 1375static void __exit charger_manager_cleanup(void)
1056{ 1376{
1377 destroy_workqueue(cm_wq);
1378 cm_wq = NULL;
1379
1057 platform_driver_unregister(&charger_manager_driver); 1380 platform_driver_unregister(&charger_manager_driver);
1058} 1381}
1059module_exit(charger_manager_cleanup); 1382module_exit(charger_manager_cleanup);
1060 1383
1384/**
1385 * find_power_supply - find the associated power_supply of charger
1386 * @cm: the Charger Manager representing the battery
1387 * @psy: pointer to instance of charger's power_supply
1388 */
1389static bool find_power_supply(struct charger_manager *cm,
1390 struct power_supply *psy)
1391{
1392 int i;
1393 bool found = false;
1394
1395 for (i = 0; cm->charger_stat[i]; i++) {
1396 if (psy == cm->charger_stat[i]) {
1397 found = true;
1398 break;
1399 }
1400 }
1401
1402 return found;
1403}
1404
1405/**
1406 * cm_notify_event - charger driver notify Charger Manager of charger event
1407 * @psy: pointer to instance of charger's power_supply
1408 * @type: type of charger event
1409 * @msg: optional message passed to uevent_notify fuction
1410 */
1411void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
1412 char *msg)
1413{
1414 struct charger_manager *cm;
1415 bool found_power_supply = false;
1416
1417 if (psy == NULL)
1418 return;
1419
1420 mutex_lock(&cm_list_mtx);
1421 list_for_each_entry(cm, &cm_list, entry) {
1422 found_power_supply = find_power_supply(cm, psy);
1423 if (found_power_supply)
1424 break;
1425 }
1426 mutex_unlock(&cm_list_mtx);
1427
1428 if (!found_power_supply)
1429 return;
1430
1431 switch (type) {
1432 case CM_EVENT_BATT_FULL:
1433 fullbatt_handler(cm);
1434 break;
1435 case CM_EVENT_BATT_OUT:
1436 battout_handler(cm);
1437 break;
1438 case CM_EVENT_BATT_IN:
1439 case CM_EVENT_EXT_PWR_IN_OUT ... CM_EVENT_CHG_START_STOP:
1440 misc_event_handler(cm, type);
1441 break;
1442 case CM_EVENT_UNKNOWN:
1443 case CM_EVENT_OTHERS:
1444 uevent_notify(cm, msg ? msg : default_event_names[type]);
1445 break;
1446 default:
1447 dev_err(cm->dev, "%s type not specified.\n", __func__);
1448 break;
1449 }
1450}
1451EXPORT_SYMBOL_GPL(cm_notify_event);
1452
1061MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1453MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1062MODULE_DESCRIPTION("Charger Manager"); 1454MODULE_DESCRIPTION("Charger Manager");
1063MODULE_LICENSE("GPL"); 1455MODULE_LICENSE("GPL");
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/ds2781_battery.c
index ca0d653d0a7a..975684a40f15 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/ds2781_battery.c
@@ -643,9 +643,7 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
643 struct power_supply *psy = to_power_supply(dev); 643 struct power_supply *psy = to_power_supply(dev);
644 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); 644 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
645 645
646 count = min_t(loff_t, count, 646 count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
647 DS2781_EEPROM_BLOCK1_END -
648 DS2781_EEPROM_BLOCK1_START + 1 - off);
649 647
650 return ds2781_read_block(dev_info, buf, 648 return ds2781_read_block(dev_info, buf,
651 DS2781_EEPROM_BLOCK1_START + off, count); 649 DS2781_EEPROM_BLOCK1_START + off, count);
@@ -661,9 +659,7 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
661 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); 659 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
662 int ret; 660 int ret;
663 661
664 count = min_t(loff_t, count, 662 count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
665 DS2781_EEPROM_BLOCK1_END -
666 DS2781_EEPROM_BLOCK1_START + 1 - off);
667 663
668 ret = ds2781_write(dev_info, buf, 664 ret = ds2781_write(dev_info, buf,
669 DS2781_EEPROM_BLOCK1_START + off, count); 665 DS2781_EEPROM_BLOCK1_START + off, count);
@@ -682,7 +678,7 @@ static struct bin_attribute ds2781_param_eeprom_bin_attr = {
682 .name = "param_eeprom", 678 .name = "param_eeprom",
683 .mode = S_IRUGO | S_IWUSR, 679 .mode = S_IRUGO | S_IWUSR,
684 }, 680 },
685 .size = DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1, 681 .size = DS2781_PARAM_EEPROM_SIZE,
686 .read = ds2781_read_param_eeprom_bin, 682 .read = ds2781_read_param_eeprom_bin,
687 .write = ds2781_write_param_eeprom_bin, 683 .write = ds2781_write_param_eeprom_bin,
688}; 684};
@@ -696,9 +692,7 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
696 struct power_supply *psy = to_power_supply(dev); 692 struct power_supply *psy = to_power_supply(dev);
697 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); 693 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
698 694
699 count = min_t(loff_t, count, 695 count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
700 DS2781_EEPROM_BLOCK0_END -
701 DS2781_EEPROM_BLOCK0_START + 1 - off);
702 696
703 return ds2781_read_block(dev_info, buf, 697 return ds2781_read_block(dev_info, buf,
704 DS2781_EEPROM_BLOCK0_START + off, count); 698 DS2781_EEPROM_BLOCK0_START + off, count);
@@ -715,9 +709,7 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
715 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); 709 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
716 int ret; 710 int ret;
717 711
718 count = min_t(loff_t, count, 712 count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
719 DS2781_EEPROM_BLOCK0_END -
720 DS2781_EEPROM_BLOCK0_START + 1 - off);
721 713
722 ret = ds2781_write(dev_info, buf, 714 ret = ds2781_write(dev_info, buf,
723 DS2781_EEPROM_BLOCK0_START + off, count); 715 DS2781_EEPROM_BLOCK0_START + off, count);
@@ -736,7 +728,7 @@ static struct bin_attribute ds2781_user_eeprom_bin_attr = {
736 .name = "user_eeprom", 728 .name = "user_eeprom",
737 .mode = S_IRUGO | S_IWUSR, 729 .mode = S_IRUGO | S_IWUSR,
738 }, 730 },
739 .size = DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1, 731 .size = DS2781_USER_EEPROM_SIZE,
740 .read = ds2781_read_user_eeprom_bin, 732 .read = ds2781_read_user_eeprom_bin,
741 .write = ds2781_write_user_eeprom_bin, 733 .write = ds2781_write_user_eeprom_bin,
742}; 734};
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 39eb50f35f09..e5ccd2979773 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -474,13 +474,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
474fail2: 474fail2:
475 power_supply_unregister(&isp->psy); 475 power_supply_unregister(&isp->psy);
476fail1: 476fail1:
477 isp1704_charger_set_power(isp, 0);
477 usb_put_transceiver(isp->phy); 478 usb_put_transceiver(isp->phy);
478fail0: 479fail0:
479 kfree(isp); 480 kfree(isp);
480 481
481 dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret); 482 dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
482 483
483 isp1704_charger_set_power(isp, 0);
484 return ret; 484 return ret;
485} 485}
486 486
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 04620c2cb388..140788b309f8 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -28,6 +28,7 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/pm.h>
31#include <linux/mod_devicetable.h> 32#include <linux/mod_devicetable.h>
32#include <linux/power_supply.h> 33#include <linux/power_supply.h>
33#include <linux/power/max17042_battery.h> 34#include <linux/power/max17042_battery.h>
@@ -61,9 +62,13 @@
61#define dP_ACC_100 0x1900 62#define dP_ACC_100 0x1900
62#define dP_ACC_200 0x3200 63#define dP_ACC_200 0x3200
63 64
65#define MAX17042_IC_VERSION 0x0092
66#define MAX17047_IC_VERSION 0x00AC /* same for max17050 */
67
64struct max17042_chip { 68struct max17042_chip {
65 struct i2c_client *client; 69 struct i2c_client *client;
66 struct power_supply battery; 70 struct power_supply battery;
71 enum max170xx_chip_type chip_type;
67 struct max17042_platform_data *pdata; 72 struct max17042_platform_data *pdata;
68 struct work_struct work; 73 struct work_struct work;
69 int init_complete; 74 int init_complete;
@@ -105,6 +110,7 @@ static enum power_supply_property max17042_battery_props[] = {
105 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 110 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
106 POWER_SUPPLY_PROP_VOLTAGE_NOW, 111 POWER_SUPPLY_PROP_VOLTAGE_NOW,
107 POWER_SUPPLY_PROP_VOLTAGE_AVG, 112 POWER_SUPPLY_PROP_VOLTAGE_AVG,
113 POWER_SUPPLY_PROP_VOLTAGE_OCV,
108 POWER_SUPPLY_PROP_CAPACITY, 114 POWER_SUPPLY_PROP_CAPACITY,
109 POWER_SUPPLY_PROP_CHARGE_FULL, 115 POWER_SUPPLY_PROP_CHARGE_FULL,
110 POWER_SUPPLY_PROP_TEMP, 116 POWER_SUPPLY_PROP_TEMP,
@@ -150,7 +156,10 @@ static int max17042_get_property(struct power_supply *psy,
150 val->intval *= 20000; /* Units of LSB = 20mV */ 156 val->intval *= 20000; /* Units of LSB = 20mV */
151 break; 157 break;
152 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: 158 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
153 ret = max17042_read_reg(chip->client, MAX17042_V_empty); 159 if (chip->chip_type == MAX17042)
160 ret = max17042_read_reg(chip->client, MAX17042_V_empty);
161 else
162 ret = max17042_read_reg(chip->client, MAX17047_V_empty);
154 if (ret < 0) 163 if (ret < 0)
155 return ret; 164 return ret;
156 165
@@ -171,6 +180,13 @@ static int max17042_get_property(struct power_supply *psy,
171 180
172 val->intval = ret * 625 / 8; 181 val->intval = ret * 625 / 8;
173 break; 182 break;
183 case POWER_SUPPLY_PROP_VOLTAGE_OCV:
184 ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
185 if (ret < 0)
186 return ret;
187
188 val->intval = ret * 625 / 8;
189 break;
174 case POWER_SUPPLY_PROP_CAPACITY: 190 case POWER_SUPPLY_PROP_CAPACITY:
175 ret = max17042_read_reg(chip->client, MAX17042_RepSOC); 191 ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
176 if (ret < 0) 192 if (ret < 0)
@@ -325,11 +341,10 @@ static inline int max17042_model_data_compare(struct max17042_chip *chip,
325static int max17042_init_model(struct max17042_chip *chip) 341static int max17042_init_model(struct max17042_chip *chip)
326{ 342{
327 int ret; 343 int ret;
328 int table_size = 344 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
329 sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
330 u16 *temp_data; 345 u16 *temp_data;
331 346
332 temp_data = kzalloc(table_size, GFP_KERNEL); 347 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
333 if (!temp_data) 348 if (!temp_data)
334 return -ENOMEM; 349 return -ENOMEM;
335 350
@@ -354,12 +369,11 @@ static int max17042_init_model(struct max17042_chip *chip)
354static int max17042_verify_model_lock(struct max17042_chip *chip) 369static int max17042_verify_model_lock(struct max17042_chip *chip)
355{ 370{
356 int i; 371 int i;
357 int table_size = 372 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
358 sizeof(chip->pdata->config_data->cell_char_tbl);
359 u16 *temp_data; 373 u16 *temp_data;
360 int ret = 0; 374 int ret = 0;
361 375
362 temp_data = kzalloc(table_size, GFP_KERNEL); 376 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
363 if (!temp_data) 377 if (!temp_data)
364 return -ENOMEM; 378 return -ENOMEM;
365 379
@@ -382,6 +396,9 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
382 max17042_write_reg(chip->client, MAX17042_FilterCFG, 396 max17042_write_reg(chip->client, MAX17042_FilterCFG,
383 config->filter_cfg); 397 config->filter_cfg);
384 max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg); 398 max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
399 if (chip->chip_type == MAX17047)
400 max17042_write_reg(chip->client, MAX17047_FullSOCThr,
401 config->full_soc_thresh);
385} 402}
386 403
387static void max17042_write_custom_regs(struct max17042_chip *chip) 404static void max17042_write_custom_regs(struct max17042_chip *chip)
@@ -392,12 +409,23 @@ static void max17042_write_custom_regs(struct max17042_chip *chip)
392 config->rcomp0); 409 config->rcomp0);
393 max17042_write_verify_reg(chip->client, MAX17042_TempCo, 410 max17042_write_verify_reg(chip->client, MAX17042_TempCo,
394 config->tcompc0); 411 config->tcompc0);
395 max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
396 config->empty_tempco);
397 max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
398 config->kempty0);
399 max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm, 412 max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
400 config->ichgt_term); 413 config->ichgt_term);
414 if (chip->chip_type == MAX17042) {
415 max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
416 config->empty_tempco);
417 max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
418 config->kempty0);
419 } else {
420 max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
421 config->qrtbl00);
422 max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
423 config->qrtbl10);
424 max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
425 config->qrtbl20);
426 max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
427 config->qrtbl30);
428 }
401} 429}
402 430
403static void max17042_update_capacity_regs(struct max17042_chip *chip) 431static void max17042_update_capacity_regs(struct max17042_chip *chip)
@@ -453,6 +481,8 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip)
453 config->design_cap); 481 config->design_cap);
454 max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom, 482 max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
455 config->fullcapnom); 483 config->fullcapnom);
484 /* Update SOC register with new SOC */
485 max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
456} 486}
457 487
458/* 488/*
@@ -489,20 +519,28 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
489 519
490 max17042_override_por(client, MAX17042_FullCAP, config->fullcap); 520 max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
491 max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom); 521 max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
492 max17042_override_por(client, MAX17042_SOC_empty, config->socempty); 522 if (chip->chip_type == MAX17042)
523 max17042_override_por(client, MAX17042_SOC_empty,
524 config->socempty);
493 max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty); 525 max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
494 max17042_override_por(client, MAX17042_dQacc, config->dqacc); 526 max17042_override_por(client, MAX17042_dQacc, config->dqacc);
495 max17042_override_por(client, MAX17042_dPacc, config->dpacc); 527 max17042_override_por(client, MAX17042_dPacc, config->dpacc);
496 528
497 max17042_override_por(client, MAX17042_V_empty, config->vempty); 529 if (chip->chip_type == MAX17042)
530 max17042_override_por(client, MAX17042_V_empty, config->vempty);
531 else
532 max17042_override_por(client, MAX17047_V_empty, config->vempty);
498 max17042_override_por(client, MAX17042_TempNom, config->temp_nom); 533 max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
499 max17042_override_por(client, MAX17042_TempLim, config->temp_lim); 534 max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
500 max17042_override_por(client, MAX17042_FCTC, config->fctc); 535 max17042_override_por(client, MAX17042_FCTC, config->fctc);
501 max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0); 536 max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
502 max17042_override_por(client, MAX17042_TempCo, config->tcompc0); 537 max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
503 max17042_override_por(client, MAX17042_EmptyTempCo, 538 if (chip->chip_type) {
504 config->empty_tempco); 539 max17042_override_por(client, MAX17042_EmptyTempCo,
505 max17042_override_por(client, MAX17042_K_empty0, config->kempty0); 540 config->empty_tempco);
541 max17042_override_por(client, MAX17042_K_empty0,
542 config->kempty0);
543 }
506} 544}
507 545
508static int max17042_init_chip(struct max17042_chip *chip) 546static int max17042_init_chip(struct max17042_chip *chip)
@@ -659,7 +697,19 @@ static int __devinit max17042_probe(struct i2c_client *client,
659 697
660 i2c_set_clientdata(client, chip); 698 i2c_set_clientdata(client, chip);
661 699
662 chip->battery.name = "max17042_battery"; 700 ret = max17042_read_reg(chip->client, MAX17042_DevName);
701 if (ret == MAX17042_IC_VERSION) {
702 dev_dbg(&client->dev, "chip type max17042 detected\n");
703 chip->chip_type = MAX17042;
704 } else if (ret == MAX17047_IC_VERSION) {
705 dev_dbg(&client->dev, "chip type max17047/50 detected\n");
706 chip->chip_type = MAX17047;
707 } else {
708 dev_err(&client->dev, "device version mismatch: %x\n", ret);
709 return -EIO;
710 }
711
712 chip->battery.name = "max170xx_battery";
663 chip->battery.type = POWER_SUPPLY_TYPE_BATTERY; 713 chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
664 chip->battery.get_property = max17042_get_property; 714 chip->battery.get_property = max17042_get_property;
665 chip->battery.properties = max17042_battery_props; 715 chip->battery.properties = max17042_battery_props;
@@ -683,6 +733,12 @@ static int __devinit max17042_probe(struct i2c_client *client,
683 max17042_write_reg(client, MAX17042_LearnCFG, 0x0007); 733 max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
684 } 734 }
685 735
736 ret = power_supply_register(&client->dev, &chip->battery);
737 if (ret) {
738 dev_err(&client->dev, "failed: power supply register\n");
739 return ret;
740 }
741
686 if (client->irq) { 742 if (client->irq) {
687 ret = request_threaded_irq(client->irq, NULL, 743 ret = request_threaded_irq(client->irq, NULL,
688 max17042_thread_handler, 744 max17042_thread_handler,
@@ -693,13 +749,14 @@ static int __devinit max17042_probe(struct i2c_client *client,
693 reg |= CONFIG_ALRT_BIT_ENBL; 749 reg |= CONFIG_ALRT_BIT_ENBL;
694 max17042_write_reg(client, MAX17042_CONFIG, reg); 750 max17042_write_reg(client, MAX17042_CONFIG, reg);
695 max17042_set_soc_threshold(chip, 1); 751 max17042_set_soc_threshold(chip, 1);
696 } else 752 } else {
753 client->irq = 0;
697 dev_err(&client->dev, "%s(): cannot get IRQ\n", 754 dev_err(&client->dev, "%s(): cannot get IRQ\n",
698 __func__); 755 __func__);
756 }
699 } 757 }
700 758
701 reg = max17042_read_reg(chip->client, MAX17042_STATUS); 759 reg = max17042_read_reg(chip->client, MAX17042_STATUS);
702
703 if (reg & STATUS_POR_BIT) { 760 if (reg & STATUS_POR_BIT) {
704 INIT_WORK(&chip->work, max17042_init_worker); 761 INIT_WORK(&chip->work, max17042_init_worker);
705 schedule_work(&chip->work); 762 schedule_work(&chip->work);
@@ -707,23 +764,65 @@ static int __devinit max17042_probe(struct i2c_client *client,
707 chip->init_complete = 1; 764 chip->init_complete = 1;
708 } 765 }
709 766
710 ret = power_supply_register(&client->dev, &chip->battery); 767 return 0;
711 if (ret)
712 dev_err(&client->dev, "failed: power supply register\n");
713 return ret;
714} 768}
715 769
716static int __devexit max17042_remove(struct i2c_client *client) 770static int __devexit max17042_remove(struct i2c_client *client)
717{ 771{
718 struct max17042_chip *chip = i2c_get_clientdata(client); 772 struct max17042_chip *chip = i2c_get_clientdata(client);
719 773
774 if (client->irq)
775 free_irq(client->irq, chip);
720 power_supply_unregister(&chip->battery); 776 power_supply_unregister(&chip->battery);
721 return 0; 777 return 0;
722} 778}
723 779
780#ifdef CONFIG_PM
781static int max17042_suspend(struct device *dev)
782{
783 struct max17042_chip *chip = dev_get_drvdata(dev);
784
785 /*
786 * disable the irq and enable irq_wake
787 * capability to the interrupt line.
788 */
789 if (chip->client->irq) {
790 disable_irq(chip->client->irq);
791 enable_irq_wake(chip->client->irq);
792 }
793
794 return 0;
795}
796
797static int max17042_resume(struct device *dev)
798{
799 struct max17042_chip *chip = dev_get_drvdata(dev);
800
801 if (chip->client->irq) {
802 disable_irq_wake(chip->client->irq);
803 enable_irq(chip->client->irq);
804 /* re-program the SOC thresholds to 1% change */
805 max17042_set_soc_threshold(chip, 1);
806 }
807
808 return 0;
809}
810
811static const struct dev_pm_ops max17042_pm_ops = {
812 .suspend = max17042_suspend,
813 .resume = max17042_resume,
814};
815
816#define MAX17042_PM_OPS (&max17042_pm_ops)
817#else
818#define MAX17042_PM_OPS NULL
819#endif
820
724#ifdef CONFIG_OF 821#ifdef CONFIG_OF
725static const struct of_device_id max17042_dt_match[] = { 822static const struct of_device_id max17042_dt_match[] = {
726 { .compatible = "maxim,max17042" }, 823 { .compatible = "maxim,max17042" },
824 { .compatible = "maxim,max17047" },
825 { .compatible = "maxim,max17050" },
727 { }, 826 { },
728}; 827};
729MODULE_DEVICE_TABLE(of, max17042_dt_match); 828MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -731,6 +830,8 @@ MODULE_DEVICE_TABLE(of, max17042_dt_match);
731 830
732static const struct i2c_device_id max17042_id[] = { 831static const struct i2c_device_id max17042_id[] = {
733 { "max17042", 0 }, 832 { "max17042", 0 },
833 { "max17047", 1 },
834 { "max17050", 2 },
734 { } 835 { }
735}; 836};
736MODULE_DEVICE_TABLE(i2c, max17042_id); 837MODULE_DEVICE_TABLE(i2c, max17042_id);
@@ -739,6 +840,7 @@ static struct i2c_driver max17042_i2c_driver = {
739 .driver = { 840 .driver = {
740 .name = "max17042", 841 .name = "max17042",
741 .of_match_table = of_match_ptr(max17042_dt_match), 842 .of_match_table = of_match_ptr(max17042_dt_match),
843 .pm = MAX17042_PM_OPS,
742 }, 844 },
743 .probe = max17042_probe, 845 .probe = max17042_probe,
744 .remove = __devexit_p(max17042_remove), 846 .remove = __devexit_p(max17042_remove),
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 4368e7d61316..4150747f9186 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -146,6 +146,7 @@ static struct device_attribute power_supply_attrs[] = {
146 POWER_SUPPLY_ATTR(voltage_min_design), 146 POWER_SUPPLY_ATTR(voltage_min_design),
147 POWER_SUPPLY_ATTR(voltage_now), 147 POWER_SUPPLY_ATTR(voltage_now),
148 POWER_SUPPLY_ATTR(voltage_avg), 148 POWER_SUPPLY_ATTR(voltage_avg),
149 POWER_SUPPLY_ATTR(voltage_ocv),
149 POWER_SUPPLY_ATTR(current_max), 150 POWER_SUPPLY_ATTR(current_max),
150 POWER_SUPPLY_ATTR(current_now), 151 POWER_SUPPLY_ATTR(current_now),
151 POWER_SUPPLY_ATTR(current_avg), 152 POWER_SUPPLY_ATTR(current_avg),
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 06b659d91790..a5b6849d4123 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -89,7 +89,7 @@ static const struct chip_data {
89 [REG_CURRENT] = 89 [REG_CURRENT] =
90 SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767), 90 SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
91 [REG_CAPACITY] = 91 [REG_CAPACITY] =
92 SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100), 92 SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100),
93 [REG_REMAINING_CAPACITY] = 93 [REG_REMAINING_CAPACITY] =
94 SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535), 94 SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
95 [REG_REMAINING_CAPACITY_CHARGE] = 95 [REG_REMAINING_CAPACITY_CHARGE] =
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
index ce1694d1a365..f8eedd8a676f 100644
--- a/drivers/power/smb347-charger.c
+++ b/drivers/power/smb347-charger.c
@@ -11,7 +11,7 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/debugfs.h> 14#include <linux/err.h>
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h> 17#include <linux/module.h>
@@ -21,7 +21,7 @@
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/power_supply.h> 22#include <linux/power_supply.h>
23#include <linux/power/smb347-charger.h> 23#include <linux/power/smb347-charger.h>
24#include <linux/seq_file.h> 24#include <linux/regmap.h>
25 25
26/* 26/*
27 * Configuration registers. These are mirrored to volatile RAM and can be 27 * Configuration registers. These are mirrored to volatile RAM and can be
@@ -39,6 +39,7 @@
39#define CFG_CURRENT_LIMIT_DC_SHIFT 4 39#define CFG_CURRENT_LIMIT_DC_SHIFT 4
40#define CFG_CURRENT_LIMIT_USB_MASK 0x0f 40#define CFG_CURRENT_LIMIT_USB_MASK 0x0f
41#define CFG_FLOAT_VOLTAGE 0x03 41#define CFG_FLOAT_VOLTAGE 0x03
42#define CFG_FLOAT_VOLTAGE_FLOAT_MASK 0x3f
42#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK 0xc0 43#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK 0xc0
43#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT 6 44#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT 6
44#define CFG_STAT 0x05 45#define CFG_STAT 0x05
@@ -113,29 +114,31 @@
113#define STAT_C_CHARGER_ERROR BIT(6) 114#define STAT_C_CHARGER_ERROR BIT(6)
114#define STAT_E 0x3f 115#define STAT_E 0x3f
115 116
117#define SMB347_MAX_REGISTER 0x3f
118
116/** 119/**
117 * struct smb347_charger - smb347 charger instance 120 * struct smb347_charger - smb347 charger instance
118 * @lock: protects concurrent access to online variables 121 * @lock: protects concurrent access to online variables
119 * @client: pointer to i2c client 122 * @dev: pointer to device
123 * @regmap: pointer to driver regmap
120 * @mains: power_supply instance for AC/DC power 124 * @mains: power_supply instance for AC/DC power
121 * @usb: power_supply instance for USB power 125 * @usb: power_supply instance for USB power
122 * @battery: power_supply instance for battery 126 * @battery: power_supply instance for battery
123 * @mains_online: is AC/DC input connected 127 * @mains_online: is AC/DC input connected
124 * @usb_online: is USB input connected 128 * @usb_online: is USB input connected
125 * @charging_enabled: is charging enabled 129 * @charging_enabled: is charging enabled
126 * @dentry: for debugfs
127 * @pdata: pointer to platform data 130 * @pdata: pointer to platform data
128 */ 131 */
129struct smb347_charger { 132struct smb347_charger {
130 struct mutex lock; 133 struct mutex lock;
131 struct i2c_client *client; 134 struct device *dev;
135 struct regmap *regmap;
132 struct power_supply mains; 136 struct power_supply mains;
133 struct power_supply usb; 137 struct power_supply usb;
134 struct power_supply battery; 138 struct power_supply battery;
135 bool mains_online; 139 bool mains_online;
136 bool usb_online; 140 bool usb_online;
137 bool charging_enabled; 141 bool charging_enabled;
138 struct dentry *dentry;
139 const struct smb347_charger_platform_data *pdata; 142 const struct smb347_charger_platform_data *pdata;
140}; 143};
141 144
@@ -193,14 +196,6 @@ static const unsigned int ccc_tbl[] = {
193 1200000, 196 1200000,
194}; 197};
195 198
196/* Convert register value to current using lookup table */
197static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
198{
199 if (val >= size)
200 return -EINVAL;
201 return tbl[val];
202}
203
204/* Convert current to register value using lookup table */ 199/* Convert current to register value using lookup table */
205static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val) 200static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
206{ 201{
@@ -212,43 +207,22 @@ static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
212 return i > 0 ? i - 1 : -EINVAL; 207 return i > 0 ? i - 1 : -EINVAL;
213} 208}
214 209
215static int smb347_read(struct smb347_charger *smb, u8 reg)
216{
217 int ret;
218
219 ret = i2c_smbus_read_byte_data(smb->client, reg);
220 if (ret < 0)
221 dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
222 reg, ret);
223 return ret;
224}
225
226static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
227{
228 int ret;
229
230 ret = i2c_smbus_write_byte_data(smb->client, reg, val);
231 if (ret < 0)
232 dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
233 reg, ret);
234 return ret;
235}
236
237/** 210/**
238 * smb347_update_status - updates the charging status 211 * smb347_update_ps_status - refreshes the power source status
239 * @smb: pointer to smb347 charger instance 212 * @smb: pointer to smb347 charger instance
240 * 213 *
241 * Function checks status of the charging and updates internal state 214 * Function checks whether any power source is connected to the charger and
242 * accordingly. Returns %0 if there is no change in status, %1 if the 215 * updates internal state accordingly. If there is a change to previous state
243 * status has changed and negative errno in case of failure. 216 * function returns %1, otherwise %0 and negative errno in case of errror.
244 */ 217 */
245static int smb347_update_status(struct smb347_charger *smb) 218static int smb347_update_ps_status(struct smb347_charger *smb)
246{ 219{
247 bool usb = false; 220 bool usb = false;
248 bool dc = false; 221 bool dc = false;
222 unsigned int val;
249 int ret; 223 int ret;
250 224
251 ret = smb347_read(smb, IRQSTAT_E); 225 ret = regmap_read(smb->regmap, IRQSTAT_E, &val);
252 if (ret < 0) 226 if (ret < 0)
253 return ret; 227 return ret;
254 228
@@ -257,9 +231,9 @@ static int smb347_update_status(struct smb347_charger *smb)
257 * platform data _and_ whether corresponding undervoltage is set. 231 * platform data _and_ whether corresponding undervoltage is set.
258 */ 232 */
259 if (smb->pdata->use_mains) 233 if (smb->pdata->use_mains)
260 dc = !(ret & IRQSTAT_E_DCIN_UV_STAT); 234 dc = !(val & IRQSTAT_E_DCIN_UV_STAT);
261 if (smb->pdata->use_usb) 235 if (smb->pdata->use_usb)
262 usb = !(ret & IRQSTAT_E_USBIN_UV_STAT); 236 usb = !(val & IRQSTAT_E_USBIN_UV_STAT);
263 237
264 mutex_lock(&smb->lock); 238 mutex_lock(&smb->lock);
265 ret = smb->mains_online != dc || smb->usb_online != usb; 239 ret = smb->mains_online != dc || smb->usb_online != usb;
@@ -271,15 +245,15 @@ static int smb347_update_status(struct smb347_charger *smb)
271} 245}
272 246
273/* 247/*
274 * smb347_is_online - returns whether input power source is connected 248 * smb347_is_ps_online - returns whether input power source is connected
275 * @smb: pointer to smb347 charger instance 249 * @smb: pointer to smb347 charger instance
276 * 250 *
277 * Returns %true if input power source is connected. Note that this is 251 * Returns %true if input power source is connected. Note that this is
278 * dependent on what platform has configured for usable power sources. For 252 * dependent on what platform has configured for usable power sources. For
279 * example if USB is disabled, this will return %false even if the USB 253 * example if USB is disabled, this will return %false even if the USB cable
280 * cable is connected. 254 * is connected.
281 */ 255 */
282static bool smb347_is_online(struct smb347_charger *smb) 256static bool smb347_is_ps_online(struct smb347_charger *smb)
283{ 257{
284 bool ret; 258 bool ret;
285 259
@@ -299,16 +273,17 @@ static bool smb347_is_online(struct smb347_charger *smb)
299 */ 273 */
300static int smb347_charging_status(struct smb347_charger *smb) 274static int smb347_charging_status(struct smb347_charger *smb)
301{ 275{
276 unsigned int val;
302 int ret; 277 int ret;
303 278
304 if (!smb347_is_online(smb)) 279 if (!smb347_is_ps_online(smb))
305 return 0; 280 return 0;
306 281
307 ret = smb347_read(smb, STAT_C); 282 ret = regmap_read(smb->regmap, STAT_C, &val);
308 if (ret < 0) 283 if (ret < 0)
309 return 0; 284 return 0;
310 285
311 return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT; 286 return (val & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
312} 287}
313 288
314static int smb347_charging_set(struct smb347_charger *smb, bool enable) 289static int smb347_charging_set(struct smb347_charger *smb, bool enable)
@@ -316,27 +291,17 @@ static int smb347_charging_set(struct smb347_charger *smb, bool enable)
316 int ret = 0; 291 int ret = 0;
317 292
318 if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) { 293 if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
319 dev_dbg(&smb->client->dev, 294 dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
320 "charging enable/disable in SW disabled\n");
321 return 0; 295 return 0;
322 } 296 }
323 297
324 mutex_lock(&smb->lock); 298 mutex_lock(&smb->lock);
325 if (smb->charging_enabled != enable) { 299 if (smb->charging_enabled != enable) {
326 ret = smb347_read(smb, CMD_A); 300 ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
327 if (ret < 0) 301 enable ? CMD_A_CHG_ENABLED : 0);
328 goto out; 302 if (!ret)
329 303 smb->charging_enabled = enable;
330 smb->charging_enabled = enable;
331
332 if (enable)
333 ret |= CMD_A_CHG_ENABLED;
334 else
335 ret &= ~CMD_A_CHG_ENABLED;
336
337 ret = smb347_write(smb, CMD_A, ret);
338 } 304 }
339out:
340 mutex_unlock(&smb->lock); 305 mutex_unlock(&smb->lock);
341 return ret; 306 return ret;
342} 307}
@@ -351,7 +316,7 @@ static inline int smb347_charging_disable(struct smb347_charger *smb)
351 return smb347_charging_set(smb, false); 316 return smb347_charging_set(smb, false);
352} 317}
353 318
354static int smb347_update_online(struct smb347_charger *smb) 319static int smb347_start_stop_charging(struct smb347_charger *smb)
355{ 320{
356 int ret; 321 int ret;
357 322
@@ -360,16 +325,14 @@ static int smb347_update_online(struct smb347_charger *smb)
360 * disable or enable the charging. We do it manually because it 325 * disable or enable the charging. We do it manually because it
361 * depends on how the platform has configured the valid inputs. 326 * depends on how the platform has configured the valid inputs.
362 */ 327 */
363 if (smb347_is_online(smb)) { 328 if (smb347_is_ps_online(smb)) {
364 ret = smb347_charging_enable(smb); 329 ret = smb347_charging_enable(smb);
365 if (ret < 0) 330 if (ret < 0)
366 dev_err(&smb->client->dev, 331 dev_err(smb->dev, "failed to enable charging\n");
367 "failed to enable charging\n");
368 } else { 332 } else {
369 ret = smb347_charging_disable(smb); 333 ret = smb347_charging_disable(smb);
370 if (ret < 0) 334 if (ret < 0)
371 dev_err(&smb->client->dev, 335 dev_err(smb->dev, "failed to disable charging\n");
372 "failed to disable charging\n");
373 } 336 }
374 337
375 return ret; 338 return ret;
@@ -377,112 +340,120 @@ static int smb347_update_online(struct smb347_charger *smb)
377 340
378static int smb347_set_charge_current(struct smb347_charger *smb) 341static int smb347_set_charge_current(struct smb347_charger *smb)
379{ 342{
380 int ret, val; 343 int ret;
381
382 ret = smb347_read(smb, CFG_CHARGE_CURRENT);
383 if (ret < 0)
384 return ret;
385 344
386 if (smb->pdata->max_charge_current) { 345 if (smb->pdata->max_charge_current) {
387 val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl), 346 ret = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
388 smb->pdata->max_charge_current); 347 smb->pdata->max_charge_current);
389 if (val < 0) 348 if (ret < 0)
390 return val; 349 return ret;
391 350
392 ret &= ~CFG_CHARGE_CURRENT_FCC_MASK; 351 ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
393 ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT; 352 CFG_CHARGE_CURRENT_FCC_MASK,
353 ret << CFG_CHARGE_CURRENT_FCC_SHIFT);
354 if (ret < 0)
355 return ret;
394 } 356 }
395 357
396 if (smb->pdata->pre_charge_current) { 358 if (smb->pdata->pre_charge_current) {
397 val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl), 359 ret = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
398 smb->pdata->pre_charge_current); 360 smb->pdata->pre_charge_current);
399 if (val < 0) 361 if (ret < 0)
400 return val; 362 return ret;
401 363
402 ret &= ~CFG_CHARGE_CURRENT_PCC_MASK; 364 ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
403 ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT; 365 CFG_CHARGE_CURRENT_PCC_MASK,
366 ret << CFG_CHARGE_CURRENT_PCC_SHIFT);
367 if (ret < 0)
368 return ret;
404 } 369 }
405 370
406 if (smb->pdata->termination_current) { 371 if (smb->pdata->termination_current) {
407 val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl), 372 ret = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
408 smb->pdata->termination_current); 373 smb->pdata->termination_current);
409 if (val < 0) 374 if (ret < 0)
410 return val; 375 return ret;
411 376
412 ret &= ~CFG_CHARGE_CURRENT_TC_MASK; 377 ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
413 ret |= val; 378 CFG_CHARGE_CURRENT_TC_MASK, ret);
379 if (ret < 0)
380 return ret;
414 } 381 }
415 382
416 return smb347_write(smb, CFG_CHARGE_CURRENT, ret); 383 return 0;
417} 384}
418 385
419static int smb347_set_current_limits(struct smb347_charger *smb) 386static int smb347_set_current_limits(struct smb347_charger *smb)
420{ 387{
421 int ret, val; 388 int ret;
422
423 ret = smb347_read(smb, CFG_CURRENT_LIMIT);
424 if (ret < 0)
425 return ret;
426 389
427 if (smb->pdata->mains_current_limit) { 390 if (smb->pdata->mains_current_limit) {
428 val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl), 391 ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
429 smb->pdata->mains_current_limit); 392 smb->pdata->mains_current_limit);
430 if (val < 0) 393 if (ret < 0)
431 return val; 394 return ret;
432 395
433 ret &= ~CFG_CURRENT_LIMIT_DC_MASK; 396 ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
434 ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT; 397 CFG_CURRENT_LIMIT_DC_MASK,
398 ret << CFG_CURRENT_LIMIT_DC_SHIFT);
399 if (ret < 0)
400 return ret;
435 } 401 }
436 402
437 if (smb->pdata->usb_hc_current_limit) { 403 if (smb->pdata->usb_hc_current_limit) {
438 val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl), 404 ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
439 smb->pdata->usb_hc_current_limit); 405 smb->pdata->usb_hc_current_limit);
440 if (val < 0) 406 if (ret < 0)
441 return val; 407 return ret;
442 408
443 ret &= ~CFG_CURRENT_LIMIT_USB_MASK; 409 ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
444 ret |= val; 410 CFG_CURRENT_LIMIT_USB_MASK, ret);
411 if (ret < 0)
412 return ret;
445 } 413 }
446 414
447 return smb347_write(smb, CFG_CURRENT_LIMIT, ret); 415 return 0;
448} 416}
449 417
450static int smb347_set_voltage_limits(struct smb347_charger *smb) 418static int smb347_set_voltage_limits(struct smb347_charger *smb)
451{ 419{
452 int ret, val; 420 int ret;
453
454 ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
455 if (ret < 0)
456 return ret;
457 421
458 if (smb->pdata->pre_to_fast_voltage) { 422 if (smb->pdata->pre_to_fast_voltage) {
459 val = smb->pdata->pre_to_fast_voltage; 423 ret = smb->pdata->pre_to_fast_voltage;
460 424
461 /* uV */ 425 /* uV */
462 val = clamp_val(val, 2400000, 3000000) - 2400000; 426 ret = clamp_val(ret, 2400000, 3000000) - 2400000;
463 val /= 200000; 427 ret /= 200000;
464 428
465 ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK; 429 ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
466 ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT; 430 CFG_FLOAT_VOLTAGE_THRESHOLD_MASK,
431 ret << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT);
432 if (ret < 0)
433 return ret;
467 } 434 }
468 435
469 if (smb->pdata->max_charge_voltage) { 436 if (smb->pdata->max_charge_voltage) {
470 val = smb->pdata->max_charge_voltage; 437 ret = smb->pdata->max_charge_voltage;
471 438
472 /* uV */ 439 /* uV */
473 val = clamp_val(val, 3500000, 4500000) - 3500000; 440 ret = clamp_val(ret, 3500000, 4500000) - 3500000;
474 val /= 20000; 441 ret /= 20000;
475 442
476 ret |= val; 443 ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
444 CFG_FLOAT_VOLTAGE_FLOAT_MASK, ret);
445 if (ret < 0)
446 return ret;
477 } 447 }
478 448
479 return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret); 449 return 0;
480} 450}
481 451
482static int smb347_set_temp_limits(struct smb347_charger *smb) 452static int smb347_set_temp_limits(struct smb347_charger *smb)
483{ 453{
484 bool enable_therm_monitor = false; 454 bool enable_therm_monitor = false;
485 int ret, val; 455 int ret = 0;
456 int val;
486 457
487 if (smb->pdata->chip_temp_threshold) { 458 if (smb->pdata->chip_temp_threshold) {
488 val = smb->pdata->chip_temp_threshold; 459 val = smb->pdata->chip_temp_threshold;
@@ -491,22 +462,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
491 val = clamp_val(val, 100, 130) - 100; 462 val = clamp_val(val, 100, 130) - 100;
492 val /= 10; 463 val /= 10;
493 464
494 ret = smb347_read(smb, CFG_OTG); 465 ret = regmap_update_bits(smb->regmap, CFG_OTG,
495 if (ret < 0) 466 CFG_OTG_TEMP_THRESHOLD_MASK,
496 return ret; 467 val << CFG_OTG_TEMP_THRESHOLD_SHIFT);
497
498 ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
499 ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
500
501 ret = smb347_write(smb, CFG_OTG, ret);
502 if (ret < 0) 468 if (ret < 0)
503 return ret; 469 return ret;
504 } 470 }
505 471
506 ret = smb347_read(smb, CFG_TEMP_LIMIT);
507 if (ret < 0)
508 return ret;
509
510 if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) { 472 if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
511 val = smb->pdata->soft_cold_temp_limit; 473 val = smb->pdata->soft_cold_temp_limit;
512 474
@@ -515,8 +477,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
515 /* this goes from higher to lower so invert the value */ 477 /* this goes from higher to lower so invert the value */
516 val = ~val & 0x3; 478 val = ~val & 0x3;
517 479
518 ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK; 480 ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
519 ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT; 481 CFG_TEMP_LIMIT_SOFT_COLD_MASK,
482 val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT);
483 if (ret < 0)
484 return ret;
520 485
521 enable_therm_monitor = true; 486 enable_therm_monitor = true;
522 } 487 }
@@ -527,8 +492,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
527 val = clamp_val(val, 40, 55) - 40; 492 val = clamp_val(val, 40, 55) - 40;
528 val /= 5; 493 val /= 5;
529 494
530 ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK; 495 ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
531 ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT; 496 CFG_TEMP_LIMIT_SOFT_HOT_MASK,
497 val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT);
498 if (ret < 0)
499 return ret;
532 500
533 enable_therm_monitor = true; 501 enable_therm_monitor = true;
534 } 502 }
@@ -541,8 +509,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
541 /* this goes from higher to lower so invert the value */ 509 /* this goes from higher to lower so invert the value */
542 val = ~val & 0x3; 510 val = ~val & 0x3;
543 511
544 ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK; 512 ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
545 ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT; 513 CFG_TEMP_LIMIT_HARD_COLD_MASK,
514 val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT);
515 if (ret < 0)
516 return ret;
546 517
547 enable_therm_monitor = true; 518 enable_therm_monitor = true;
548 } 519 }
@@ -553,16 +524,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
553 val = clamp_val(val, 50, 65) - 50; 524 val = clamp_val(val, 50, 65) - 50;
554 val /= 5; 525 val /= 5;
555 526
556 ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK; 527 ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
557 ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT; 528 CFG_TEMP_LIMIT_HARD_HOT_MASK,
529 val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT);
530 if (ret < 0)
531 return ret;
558 532
559 enable_therm_monitor = true; 533 enable_therm_monitor = true;
560 } 534 }
561 535
562 ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
563 if (ret < 0)
564 return ret;
565
566 /* 536 /*
567 * If any of the temperature limits are set, we also enable the 537 * If any of the temperature limits are set, we also enable the
568 * thermistor monitoring. 538 * thermistor monitoring.
@@ -574,25 +544,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
574 * depending on the configuration. 544 * depending on the configuration.
575 */ 545 */
576 if (enable_therm_monitor) { 546 if (enable_therm_monitor) {
577 ret = smb347_read(smb, CFG_THERM); 547 ret = regmap_update_bits(smb->regmap, CFG_THERM,
578 if (ret < 0) 548 CFG_THERM_MONITOR_DISABLED, 0);
579 return ret;
580
581 ret &= ~CFG_THERM_MONITOR_DISABLED;
582
583 ret = smb347_write(smb, CFG_THERM, ret);
584 if (ret < 0) 549 if (ret < 0)
585 return ret; 550 return ret;
586 } 551 }
587 552
588 if (smb->pdata->suspend_on_hard_temp_limit) { 553 if (smb->pdata->suspend_on_hard_temp_limit) {
589 ret = smb347_read(smb, CFG_SYSOK); 554 ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
590 if (ret < 0) 555 CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED, 0);
591 return ret;
592
593 ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
594
595 ret = smb347_write(smb, CFG_SYSOK, ret);
596 if (ret < 0) 556 if (ret < 0)
597 return ret; 557 return ret;
598 } 558 }
@@ -601,17 +561,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
601 SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) { 561 SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
602 val = smb->pdata->soft_temp_limit_compensation & 0x3; 562 val = smb->pdata->soft_temp_limit_compensation & 0x3;
603 563
604 ret = smb347_read(smb, CFG_THERM); 564 ret = regmap_update_bits(smb->regmap, CFG_THERM,
565 CFG_THERM_SOFT_HOT_COMPENSATION_MASK,
566 val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT);
605 if (ret < 0) 567 if (ret < 0)
606 return ret; 568 return ret;
607 569
608 ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK; 570 ret = regmap_update_bits(smb->regmap, CFG_THERM,
609 ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT; 571 CFG_THERM_SOFT_COLD_COMPENSATION_MASK,
610 572 val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT);
611 ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
612 ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
613
614 ret = smb347_write(smb, CFG_THERM, ret);
615 if (ret < 0) 573 if (ret < 0)
616 return ret; 574 return ret;
617 } 575 }
@@ -622,14 +580,9 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
622 if (val < 0) 580 if (val < 0)
623 return val; 581 return val;
624 582
625 ret = smb347_read(smb, CFG_OTG); 583 ret = regmap_update_bits(smb->regmap, CFG_OTG,
626 if (ret < 0) 584 CFG_OTG_CC_COMPENSATION_MASK,
627 return ret; 585 (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT);
628
629 ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
630 ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
631
632 ret = smb347_write(smb, CFG_OTG, ret);
633 if (ret < 0) 586 if (ret < 0)
634 return ret; 587 return ret;
635 } 588 }
@@ -648,22 +601,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
648 */ 601 */
649static int smb347_set_writable(struct smb347_charger *smb, bool writable) 602static int smb347_set_writable(struct smb347_charger *smb, bool writable)
650{ 603{
651 int ret; 604 return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
652 605 writable ? CMD_A_ALLOW_WRITE : 0);
653 ret = smb347_read(smb, CMD_A);
654 if (ret < 0)
655 return ret;
656
657 if (writable)
658 ret |= CMD_A_ALLOW_WRITE;
659 else
660 ret &= ~CMD_A_ALLOW_WRITE;
661
662 return smb347_write(smb, CMD_A, ret);
663} 606}
664 607
665static int smb347_hw_init(struct smb347_charger *smb) 608static int smb347_hw_init(struct smb347_charger *smb)
666{ 609{
610 unsigned int val;
667 int ret; 611 int ret;
668 612
669 ret = smb347_set_writable(smb, true); 613 ret = smb347_set_writable(smb, true);
@@ -692,34 +636,19 @@ static int smb347_hw_init(struct smb347_charger *smb)
692 636
693 /* If USB charging is disabled we put the USB in suspend mode */ 637 /* If USB charging is disabled we put the USB in suspend mode */
694 if (!smb->pdata->use_usb) { 638 if (!smb->pdata->use_usb) {
695 ret = smb347_read(smb, CMD_A); 639 ret = regmap_update_bits(smb->regmap, CMD_A,
696 if (ret < 0) 640 CMD_A_SUSPEND_ENABLED,
697 goto fail; 641 CMD_A_SUSPEND_ENABLED);
698
699 ret |= CMD_A_SUSPEND_ENABLED;
700
701 ret = smb347_write(smb, CMD_A, ret);
702 if (ret < 0) 642 if (ret < 0)
703 goto fail; 643 goto fail;
704 } 644 }
705 645
706 ret = smb347_read(smb, CFG_OTHER);
707 if (ret < 0)
708 goto fail;
709
710 /* 646 /*
711 * If configured by platform data, we enable hardware Auto-OTG 647 * If configured by platform data, we enable hardware Auto-OTG
712 * support for driving VBUS. Otherwise we disable it. 648 * support for driving VBUS. Otherwise we disable it.
713 */ 649 */
714 ret &= ~CFG_OTHER_RID_MASK; 650 ret = regmap_update_bits(smb->regmap, CFG_OTHER, CFG_OTHER_RID_MASK,
715 if (smb->pdata->use_usb_otg) 651 smb->pdata->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
716 ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
717
718 ret = smb347_write(smb, CFG_OTHER, ret);
719 if (ret < 0)
720 goto fail;
721
722 ret = smb347_read(smb, CFG_PIN);
723 if (ret < 0) 652 if (ret < 0)
724 goto fail; 653 goto fail;
725 654
@@ -728,32 +657,33 @@ static int smb347_hw_init(struct smb347_charger *smb)
728 * command register unless pin control is specified in the platform 657 * command register unless pin control is specified in the platform
729 * data. 658 * data.
730 */ 659 */
731 ret &= ~CFG_PIN_EN_CTRL_MASK;
732
733 switch (smb->pdata->enable_control) { 660 switch (smb->pdata->enable_control) {
734 case SMB347_CHG_ENABLE_SW:
735 /* Do nothing, 0 means i2c control */
736 break;
737 case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW: 661 case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
738 ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW; 662 val = CFG_PIN_EN_CTRL_ACTIVE_LOW;
739 break; 663 break;
740 case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH: 664 case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
741 ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH; 665 val = CFG_PIN_EN_CTRL_ACTIVE_HIGH;
666 break;
667 default:
668 val = 0;
742 break; 669 break;
743 } 670 }
744 671
745 /* Disable Automatic Power Source Detection (APSD) interrupt. */ 672 ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL_MASK,
746 ret &= ~CFG_PIN_EN_APSD_IRQ; 673 val);
674 if (ret < 0)
675 goto fail;
747 676
748 ret = smb347_write(smb, CFG_PIN, ret); 677 /* Disable Automatic Power Source Detection (APSD) interrupt. */
678 ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_APSD_IRQ, 0);
749 if (ret < 0) 679 if (ret < 0)
750 goto fail; 680 goto fail;
751 681
752 ret = smb347_update_status(smb); 682 ret = smb347_update_ps_status(smb);
753 if (ret < 0) 683 if (ret < 0)
754 goto fail; 684 goto fail;
755 685
756 ret = smb347_update_online(smb); 686 ret = smb347_start_stop_charging(smb);
757 687
758fail: 688fail:
759 smb347_set_writable(smb, false); 689 smb347_set_writable(smb, false);
@@ -763,24 +693,25 @@ fail:
763static irqreturn_t smb347_interrupt(int irq, void *data) 693static irqreturn_t smb347_interrupt(int irq, void *data)
764{ 694{
765 struct smb347_charger *smb = data; 695 struct smb347_charger *smb = data;
766 int stat_c, irqstat_e, irqstat_c; 696 unsigned int stat_c, irqstat_e, irqstat_c;
767 irqreturn_t ret = IRQ_NONE; 697 bool handled = false;
698 int ret;
768 699
769 stat_c = smb347_read(smb, STAT_C); 700 ret = regmap_read(smb->regmap, STAT_C, &stat_c);
770 if (stat_c < 0) { 701 if (ret < 0) {
771 dev_warn(&smb->client->dev, "reading STAT_C failed\n"); 702 dev_warn(smb->dev, "reading STAT_C failed\n");
772 return IRQ_NONE; 703 return IRQ_NONE;
773 } 704 }
774 705
775 irqstat_c = smb347_read(smb, IRQSTAT_C); 706 ret = regmap_read(smb->regmap, IRQSTAT_C, &irqstat_c);
776 if (irqstat_c < 0) { 707 if (ret < 0) {
777 dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n"); 708 dev_warn(smb->dev, "reading IRQSTAT_C failed\n");
778 return IRQ_NONE; 709 return IRQ_NONE;
779 } 710 }
780 711
781 irqstat_e = smb347_read(smb, IRQSTAT_E); 712 ret = regmap_read(smb->regmap, IRQSTAT_E, &irqstat_e);
782 if (irqstat_e < 0) { 713 if (ret < 0) {
783 dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n"); 714 dev_warn(smb->dev, "reading IRQSTAT_E failed\n");
784 return IRQ_NONE; 715 return IRQ_NONE;
785 } 716 }
786 717
@@ -789,13 +720,11 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
789 * disable charging. 720 * disable charging.
790 */ 721 */
791 if (stat_c & STAT_C_CHARGER_ERROR) { 722 if (stat_c & STAT_C_CHARGER_ERROR) {
792 dev_err(&smb->client->dev, 723 dev_err(smb->dev, "error in charger, disabling charging\n");
793 "error in charger, disabling charging\n");
794 724
795 smb347_charging_disable(smb); 725 smb347_charging_disable(smb);
796 power_supply_changed(&smb->battery); 726 power_supply_changed(&smb->battery);
797 727 handled = true;
798 ret = IRQ_HANDLED;
799 } 728 }
800 729
801 /* 730 /*
@@ -806,7 +735,7 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
806 if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) { 735 if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
807 if (irqstat_c & IRQSTAT_C_TERMINATION_STAT) 736 if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
808 power_supply_changed(&smb->battery); 737 power_supply_changed(&smb->battery);
809 ret = IRQ_HANDLED; 738 handled = true;
810 } 739 }
811 740
812 /* 741 /*
@@ -814,15 +743,17 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
814 * was connected or disconnected. 743 * was connected or disconnected.
815 */ 744 */
816 if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) { 745 if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
817 if (smb347_update_status(smb) > 0) { 746 if (smb347_update_ps_status(smb) > 0) {
818 smb347_update_online(smb); 747 smb347_start_stop_charging(smb);
819 power_supply_changed(&smb->mains); 748 if (smb->pdata->use_mains)
820 power_supply_changed(&smb->usb); 749 power_supply_changed(&smb->mains);
750 if (smb->pdata->use_usb)
751 power_supply_changed(&smb->usb);
821 } 752 }
822 ret = IRQ_HANDLED; 753 handled = true;
823 } 754 }
824 755
825 return ret; 756 return handled ? IRQ_HANDLED : IRQ_NONE;
826} 757}
827 758
828static int smb347_irq_set(struct smb347_charger *smb, bool enable) 759static int smb347_irq_set(struct smb347_charger *smb, bool enable)
@@ -839,41 +770,18 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
839 * - termination current reached 770 * - termination current reached
840 * - charger error 771 * - charger error
841 */ 772 */
842 if (enable) { 773 ret = regmap_update_bits(smb->regmap, CFG_FAULT_IRQ, 0xff,
843 ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV); 774 enable ? CFG_FAULT_IRQ_DCIN_UV : 0);
844 if (ret < 0) 775 if (ret < 0)
845 goto fail; 776 goto fail;
846
847 ret = smb347_write(smb, CFG_STATUS_IRQ,
848 CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
849 if (ret < 0)
850 goto fail;
851
852 ret = smb347_read(smb, CFG_PIN);
853 if (ret < 0)
854 goto fail;
855
856 ret |= CFG_PIN_EN_CHARGER_ERROR;
857
858 ret = smb347_write(smb, CFG_PIN, ret);
859 } else {
860 ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
861 if (ret < 0)
862 goto fail;
863
864 ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
865 if (ret < 0)
866 goto fail;
867
868 ret = smb347_read(smb, CFG_PIN);
869 if (ret < 0)
870 goto fail;
871
872 ret &= ~CFG_PIN_EN_CHARGER_ERROR;
873 777
874 ret = smb347_write(smb, CFG_PIN, ret); 778 ret = regmap_update_bits(smb->regmap, CFG_STATUS_IRQ, 0xff,
875 } 779 enable ? CFG_STATUS_IRQ_TERMINATION_OR_TAPER : 0);
780 if (ret < 0)
781 goto fail;
876 782
783 ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
784 enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
877fail: 785fail:
878 smb347_set_writable(smb, false); 786 smb347_set_writable(smb, false);
879 return ret; 787 return ret;
@@ -889,18 +797,18 @@ static inline int smb347_irq_disable(struct smb347_charger *smb)
889 return smb347_irq_set(smb, false); 797 return smb347_irq_set(smb, false);
890} 798}
891 799
892static int smb347_irq_init(struct smb347_charger *smb) 800static int smb347_irq_init(struct smb347_charger *smb,
801 struct i2c_client *client)
893{ 802{
894 const struct smb347_charger_platform_data *pdata = smb->pdata; 803 const struct smb347_charger_platform_data *pdata = smb->pdata;
895 int ret, irq = gpio_to_irq(pdata->irq_gpio); 804 int ret, irq = gpio_to_irq(pdata->irq_gpio);
896 805
897 ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name); 806 ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
898 if (ret < 0) 807 if (ret < 0)
899 goto fail; 808 goto fail;
900 809
901 ret = request_threaded_irq(irq, NULL, smb347_interrupt, 810 ret = request_threaded_irq(irq, NULL, smb347_interrupt,
902 IRQF_TRIGGER_FALLING, smb->client->name, 811 IRQF_TRIGGER_FALLING, client->name, smb);
903 smb);
904 if (ret < 0) 812 if (ret < 0)
905 goto fail_gpio; 813 goto fail_gpio;
906 814
@@ -912,23 +820,14 @@ static int smb347_irq_init(struct smb347_charger *smb)
912 * Configure the STAT output to be suitable for interrupts: disable 820 * Configure the STAT output to be suitable for interrupts: disable
913 * all other output (except interrupts) and make it active low. 821 * all other output (except interrupts) and make it active low.
914 */ 822 */
915 ret = smb347_read(smb, CFG_STAT); 823 ret = regmap_update_bits(smb->regmap, CFG_STAT,
916 if (ret < 0) 824 CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
917 goto fail_readonly; 825 CFG_STAT_DISABLED);
918
919 ret &= ~CFG_STAT_ACTIVE_HIGH;
920 ret |= CFG_STAT_DISABLED;
921
922 ret = smb347_write(smb, CFG_STAT, ret);
923 if (ret < 0)
924 goto fail_readonly;
925
926 ret = smb347_irq_enable(smb);
927 if (ret < 0) 826 if (ret < 0)
928 goto fail_readonly; 827 goto fail_readonly;
929 828
930 smb347_set_writable(smb, false); 829 smb347_set_writable(smb, false);
931 smb->client->irq = irq; 830 client->irq = irq;
932 return 0; 831 return 0;
933 832
934fail_readonly: 833fail_readonly:
@@ -938,7 +837,7 @@ fail_irq:
938fail_gpio: 837fail_gpio:
939 gpio_free(pdata->irq_gpio); 838 gpio_free(pdata->irq_gpio);
940fail: 839fail:
941 smb->client->irq = 0; 840 client->irq = 0;
942 return ret; 841 return ret;
943} 842}
944 843
@@ -987,13 +886,13 @@ static int smb347_battery_get_property(struct power_supply *psy,
987 const struct smb347_charger_platform_data *pdata = smb->pdata; 886 const struct smb347_charger_platform_data *pdata = smb->pdata;
988 int ret; 887 int ret;
989 888
990 ret = smb347_update_status(smb); 889 ret = smb347_update_ps_status(smb);
991 if (ret < 0) 890 if (ret < 0)
992 return ret; 891 return ret;
993 892
994 switch (prop) { 893 switch (prop) {
995 case POWER_SUPPLY_PROP_STATUS: 894 case POWER_SUPPLY_PROP_STATUS:
996 if (!smb347_is_online(smb)) { 895 if (!smb347_is_ps_online(smb)) {
997 val->intval = POWER_SUPPLY_STATUS_DISCHARGING; 896 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
998 break; 897 break;
999 } 898 }
@@ -1004,7 +903,7 @@ static int smb347_battery_get_property(struct power_supply *psy,
1004 break; 903 break;
1005 904
1006 case POWER_SUPPLY_PROP_CHARGE_TYPE: 905 case POWER_SUPPLY_PROP_CHARGE_TYPE:
1007 if (!smb347_is_online(smb)) 906 if (!smb347_is_ps_online(smb))
1008 return -ENODATA; 907 return -ENODATA;
1009 908
1010 /* 909 /*
@@ -1036,44 +935,6 @@ static int smb347_battery_get_property(struct power_supply *psy,
1036 val->intval = pdata->battery_info.voltage_max_design; 935 val->intval = pdata->battery_info.voltage_max_design;
1037 break; 936 break;
1038 937
1039 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
1040 if (!smb347_is_online(smb))
1041 return -ENODATA;
1042 ret = smb347_read(smb, STAT_A);
1043 if (ret < 0)
1044 return ret;
1045
1046 ret &= STAT_A_FLOAT_VOLTAGE_MASK;
1047 if (ret > 0x3d)
1048 ret = 0x3d;
1049
1050 val->intval = 3500000 + ret * 20000;
1051 break;
1052
1053 case POWER_SUPPLY_PROP_CURRENT_NOW:
1054 if (!smb347_is_online(smb))
1055 return -ENODATA;
1056
1057 ret = smb347_read(smb, STAT_B);
1058 if (ret < 0)
1059 return ret;
1060
1061 /*
1062 * The current value is composition of FCC and PCC values
1063 * and we can detect which table to use from bit 5.
1064 */
1065 if (ret & 0x20) {
1066 val->intval = hw_to_current(fcc_tbl,
1067 ARRAY_SIZE(fcc_tbl),
1068 ret & 7);
1069 } else {
1070 ret >>= 3;
1071 val->intval = hw_to_current(pcc_tbl,
1072 ARRAY_SIZE(pcc_tbl),
1073 ret & 7);
1074 }
1075 break;
1076
1077 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: 938 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
1078 val->intval = pdata->battery_info.charge_full_design; 939 val->intval = pdata->battery_info.charge_full_design;
1079 break; 940 break;
@@ -1095,64 +956,58 @@ static enum power_supply_property smb347_battery_properties[] = {
1095 POWER_SUPPLY_PROP_TECHNOLOGY, 956 POWER_SUPPLY_PROP_TECHNOLOGY,
1096 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 957 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
1097 POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 958 POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
1098 POWER_SUPPLY_PROP_VOLTAGE_NOW,
1099 POWER_SUPPLY_PROP_CURRENT_NOW,
1100 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 959 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
1101 POWER_SUPPLY_PROP_MODEL_NAME, 960 POWER_SUPPLY_PROP_MODEL_NAME,
1102}; 961};
1103 962
1104static int smb347_debugfs_show(struct seq_file *s, void *data) 963static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
1105{ 964{
1106 struct smb347_charger *smb = s->private; 965 switch (reg) {
1107 int ret; 966 case IRQSTAT_A:
1108 u8 reg; 967 case IRQSTAT_C:
1109 968 case IRQSTAT_E:
1110 seq_printf(s, "Control registers:\n"); 969 case IRQSTAT_F:
1111 seq_printf(s, "==================\n"); 970 case STAT_A:
1112 for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) { 971 case STAT_B:
1113 ret = smb347_read(smb, reg); 972 case STAT_C:
1114 seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret); 973 case STAT_E:
1115 } 974 return true;
1116 seq_printf(s, "\n");
1117
1118 seq_printf(s, "Command registers:\n");
1119 seq_printf(s, "==================\n");
1120 ret = smb347_read(smb, CMD_A);
1121 seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
1122 ret = smb347_read(smb, CMD_B);
1123 seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
1124 ret = smb347_read(smb, CMD_C);
1125 seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
1126 seq_printf(s, "\n");
1127
1128 seq_printf(s, "Interrupt status registers:\n");
1129 seq_printf(s, "===========================\n");
1130 for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
1131 ret = smb347_read(smb, reg);
1132 seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
1133 }
1134 seq_printf(s, "\n");
1135
1136 seq_printf(s, "Status registers:\n");
1137 seq_printf(s, "=================\n");
1138 for (reg = STAT_A; reg <= STAT_E; reg++) {
1139 ret = smb347_read(smb, reg);
1140 seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
1141 } 975 }
1142 976
1143 return 0; 977 return false;
1144} 978}
1145 979
1146static int smb347_debugfs_open(struct inode *inode, struct file *file) 980static bool smb347_readable_reg(struct device *dev, unsigned int reg)
1147{ 981{
1148 return single_open(file, smb347_debugfs_show, inode->i_private); 982 switch (reg) {
983 case CFG_CHARGE_CURRENT:
984 case CFG_CURRENT_LIMIT:
985 case CFG_FLOAT_VOLTAGE:
986 case CFG_STAT:
987 case CFG_PIN:
988 case CFG_THERM:
989 case CFG_SYSOK:
990 case CFG_OTHER:
991 case CFG_OTG:
992 case CFG_TEMP_LIMIT:
993 case CFG_FAULT_IRQ:
994 case CFG_STATUS_IRQ:
995 case CFG_ADDRESS:
996 case CMD_A:
997 case CMD_B:
998 case CMD_C:
999 return true;
1000 }
1001
1002 return smb347_volatile_reg(dev, reg);
1149} 1003}
1150 1004
1151static const struct file_operations smb347_debugfs_fops = { 1005static const struct regmap_config smb347_regmap = {
1152 .open = smb347_debugfs_open, 1006 .reg_bits = 8,
1153 .read = seq_read, 1007 .val_bits = 8,
1154 .llseek = seq_lseek, 1008 .max_register = SMB347_MAX_REGISTER,
1155 .release = single_release, 1009 .volatile_reg = smb347_volatile_reg,
1010 .readable_reg = smb347_readable_reg,
1156}; 1011};
1157 1012
1158static int smb347_probe(struct i2c_client *client, 1013static int smb347_probe(struct i2c_client *client,
@@ -1178,28 +1033,45 @@ static int smb347_probe(struct i2c_client *client,
1178 i2c_set_clientdata(client, smb); 1033 i2c_set_clientdata(client, smb);
1179 1034
1180 mutex_init(&smb->lock); 1035 mutex_init(&smb->lock);
1181 smb->client = client; 1036 smb->dev = &client->dev;
1182 smb->pdata = pdata; 1037 smb->pdata = pdata;
1183 1038
1039 smb->regmap = devm_regmap_init_i2c(client, &smb347_regmap);
1040 if (IS_ERR(smb->regmap))
1041 return PTR_ERR(smb->regmap);
1042
1184 ret = smb347_hw_init(smb); 1043 ret = smb347_hw_init(smb);
1185 if (ret < 0) 1044 if (ret < 0)
1186 return ret; 1045 return ret;
1187 1046
1188 smb->mains.name = "smb347-mains"; 1047 if (smb->pdata->use_mains) {
1189 smb->mains.type = POWER_SUPPLY_TYPE_MAINS; 1048 smb->mains.name = "smb347-mains";
1190 smb->mains.get_property = smb347_mains_get_property; 1049 smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
1191 smb->mains.properties = smb347_mains_properties; 1050 smb->mains.get_property = smb347_mains_get_property;
1192 smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties); 1051 smb->mains.properties = smb347_mains_properties;
1193 smb->mains.supplied_to = battery; 1052 smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
1194 smb->mains.num_supplicants = ARRAY_SIZE(battery); 1053 smb->mains.supplied_to = battery;
1195 1054 smb->mains.num_supplicants = ARRAY_SIZE(battery);
1196 smb->usb.name = "smb347-usb"; 1055 ret = power_supply_register(dev, &smb->mains);
1197 smb->usb.type = POWER_SUPPLY_TYPE_USB; 1056 if (ret < 0)
1198 smb->usb.get_property = smb347_usb_get_property; 1057 return ret;
1199 smb->usb.properties = smb347_usb_properties; 1058 }
1200 smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties); 1059
1201 smb->usb.supplied_to = battery; 1060 if (smb->pdata->use_usb) {
1202 smb->usb.num_supplicants = ARRAY_SIZE(battery); 1061 smb->usb.name = "smb347-usb";
1062 smb->usb.type = POWER_SUPPLY_TYPE_USB;
1063 smb->usb.get_property = smb347_usb_get_property;
1064 smb->usb.properties = smb347_usb_properties;
1065 smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
1066 smb->usb.supplied_to = battery;
1067 smb->usb.num_supplicants = ARRAY_SIZE(battery);
1068 ret = power_supply_register(dev, &smb->usb);
1069 if (ret < 0) {
1070 if (smb->pdata->use_mains)
1071 power_supply_unregister(&smb->mains);
1072 return ret;
1073 }
1074 }
1203 1075
1204 smb->battery.name = "smb347-battery"; 1076 smb->battery.name = "smb347-battery";
1205 smb->battery.type = POWER_SUPPLY_TYPE_BATTERY; 1077 smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1207,20 +1079,13 @@ static int smb347_probe(struct i2c_client *client,
1207 smb->battery.properties = smb347_battery_properties; 1079 smb->battery.properties = smb347_battery_properties;
1208 smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties); 1080 smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
1209 1081
1210 ret = power_supply_register(dev, &smb->mains);
1211 if (ret < 0)
1212 return ret;
1213
1214 ret = power_supply_register(dev, &smb->usb);
1215 if (ret < 0) {
1216 power_supply_unregister(&smb->mains);
1217 return ret;
1218 }
1219 1082
1220 ret = power_supply_register(dev, &smb->battery); 1083 ret = power_supply_register(dev, &smb->battery);
1221 if (ret < 0) { 1084 if (ret < 0) {
1222 power_supply_unregister(&smb->usb); 1085 if (smb->pdata->use_usb)
1223 power_supply_unregister(&smb->mains); 1086 power_supply_unregister(&smb->usb);
1087 if (smb->pdata->use_mains)
1088 power_supply_unregister(&smb->mains);
1224 return ret; 1089 return ret;
1225 } 1090 }
1226 1091
@@ -1229,15 +1094,15 @@ static int smb347_probe(struct i2c_client *client,
1229 * interrupt support here. 1094 * interrupt support here.
1230 */ 1095 */
1231 if (pdata->irq_gpio >= 0) { 1096 if (pdata->irq_gpio >= 0) {
1232 ret = smb347_irq_init(smb); 1097 ret = smb347_irq_init(smb, client);
1233 if (ret < 0) { 1098 if (ret < 0) {
1234 dev_warn(dev, "failed to initialize IRQ: %d\n", ret); 1099 dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
1235 dev_warn(dev, "disabling IRQ support\n"); 1100 dev_warn(dev, "disabling IRQ support\n");
1101 } else {
1102 smb347_irq_enable(smb);
1236 } 1103 }
1237 } 1104 }
1238 1105
1239 smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
1240 &smb347_debugfs_fops);
1241 return 0; 1106 return 0;
1242} 1107}
1243 1108
@@ -1245,9 +1110,6 @@ static int smb347_remove(struct i2c_client *client)
1245{ 1110{
1246 struct smb347_charger *smb = i2c_get_clientdata(client); 1111 struct smb347_charger *smb = i2c_get_clientdata(client);
1247 1112
1248 if (!IS_ERR_OR_NULL(smb->dentry))
1249 debugfs_remove(smb->dentry);
1250
1251 if (client->irq) { 1113 if (client->irq) {
1252 smb347_irq_disable(smb); 1114 smb347_irq_disable(smb);
1253 free_irq(client->irq, smb); 1115 free_irq(client->irq, smb);
@@ -1255,8 +1117,10 @@ static int smb347_remove(struct i2c_client *client)
1255 } 1117 }
1256 1118
1257 power_supply_unregister(&smb->battery); 1119 power_supply_unregister(&smb->battery);
1258 power_supply_unregister(&smb->usb); 1120 if (smb->pdata->use_usb)
1259 power_supply_unregister(&smb->mains); 1121 power_supply_unregister(&smb->usb);
1122 if (smb->pdata->use_mains)
1123 power_supply_unregister(&smb->mains);
1260 return 0; 1124 return 0;
1261} 1125}
1262 1126
@@ -1275,17 +1139,7 @@ static struct i2c_driver smb347_driver = {
1275 .id_table = smb347_id, 1139 .id_table = smb347_id,
1276}; 1140};
1277 1141
1278static int __init smb347_init(void) 1142module_i2c_driver(smb347_driver);
1279{
1280 return i2c_add_driver(&smb347_driver);
1281}
1282module_init(smb347_init);
1283
1284static void __exit smb347_exit(void)
1285{
1286 i2c_del_driver(&smb347_driver);
1287}
1288module_exit(smb347_exit);
1289 1143
1290MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>"); 1144MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
1291MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 1145MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index bc8719238793..6194d35ebb97 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -22,6 +22,20 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
22 ports for Input/Output direction to allow other traffic 22 ports for Input/Output direction to allow other traffic
23 than Maintenance transfers. 23 than Maintenance transfers.
24 24
25config RAPIDIO_DMA_ENGINE
26 bool "DMA Engine support for RapidIO"
27 depends on RAPIDIO
28 select DMADEVICES
29 select DMA_ENGINE
30 help
31 Say Y here if you want to use DMA Engine frameork for RapidIO data
32 transfers to/from target RIO devices. RapidIO uses NREAD and
33 NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
34 memory and memory on remote target device. You need a DMA controller
35 capable to perform data transfers to/from RapidIO.
36
37 If you are unsure about this, say Y here.
38
25config RAPIDIO_DEBUG 39config RAPIDIO_DEBUG
26 bool "RapidIO subsystem debug messages" 40 bool "RapidIO subsystem debug messages"
27 depends on RAPIDIO 41 depends on RAPIDIO
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 3b7b4e2dff7c..7b62860f34f8 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -3,3 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o 5obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o
6ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
7obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o
8endif
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 30d2072f480b..722246cf20ab 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -108,6 +108,7 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
108 u16 destid, u8 hopcount, u32 offset, int len, 108 u16 destid, u8 hopcount, u32 offset, int len,
109 u32 *data, int do_wr) 109 u32 *data, int do_wr)
110{ 110{
111 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
111 struct tsi721_dma_desc *bd_ptr; 112 struct tsi721_dma_desc *bd_ptr;
112 u32 rd_count, swr_ptr, ch_stat; 113 u32 rd_count, swr_ptr, ch_stat;
113 int i, err = 0; 114 int i, err = 0;
@@ -116,10 +117,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
116 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) 117 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
117 return -EINVAL; 118 return -EINVAL;
118 119
119 bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base; 120 bd_ptr = priv->mdma.bd_base;
120 121
121 rd_count = ioread32( 122 rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
122 priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
123 123
124 /* Initialize DMA descriptor */ 124 /* Initialize DMA descriptor */
125 bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); 125 bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
@@ -134,19 +134,18 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
134 mb(); 134 mb();
135 135
136 /* Start DMA operation */ 136 /* Start DMA operation */
137 iowrite32(rd_count + 2, 137 iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
138 priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 138 ioread32(regs + TSI721_DMAC_DWRCNT);
139 ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
140 i = 0; 139 i = 0;
141 140
142 /* Wait until DMA transfer is finished */ 141 /* Wait until DMA transfer is finished */
143 while ((ch_stat = ioread32(priv->regs + 142 while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
144 TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) { 143 & TSI721_DMAC_STS_RUN) {
145 udelay(1); 144 udelay(1);
146 if (++i >= 5000000) { 145 if (++i >= 5000000) {
147 dev_dbg(&priv->pdev->dev, 146 dev_dbg(&priv->pdev->dev,
148 "%s : DMA[%d] read timeout ch_status=%x\n", 147 "%s : DMA[%d] read timeout ch_status=%x\n",
149 __func__, TSI721_DMACH_MAINT, ch_stat); 148 __func__, priv->mdma.ch_id, ch_stat);
150 if (!do_wr) 149 if (!do_wr)
151 *data = 0xffffffff; 150 *data = 0xffffffff;
152 err = -EIO; 151 err = -EIO;
@@ -162,13 +161,10 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
162 __func__, ch_stat); 161 __func__, ch_stat);
163 dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", 162 dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
164 do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); 163 do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
165 iowrite32(TSI721_DMAC_INT_ALL, 164 iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
166 priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT)); 165 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
167 iowrite32(TSI721_DMAC_CTL_INIT,
168 priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
169 udelay(10); 166 udelay(10);
170 iowrite32(0, priv->regs + 167 iowrite32(0, regs + TSI721_DMAC_DWRCNT);
171 TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
172 udelay(1); 168 udelay(1);
173 if (!do_wr) 169 if (!do_wr)
174 *data = 0xffffffff; 170 *data = 0xffffffff;
@@ -184,8 +180,8 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
184 * NOTE: Skipping check and clear FIFO entries because we are waiting 180 * NOTE: Skipping check and clear FIFO entries because we are waiting
185 * for transfer to be completed. 181 * for transfer to be completed.
186 */ 182 */
187 swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT)); 183 swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
188 iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT)); 184 iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
189err_out: 185err_out:
190 186
191 return err; 187 return err;
@@ -541,6 +537,22 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
541 tsi721_pw_handler(mport); 537 tsi721_pw_handler(mport);
542 } 538 }
543 539
540#ifdef CONFIG_RAPIDIO_DMA_ENGINE
541 if (dev_int & TSI721_DEV_INT_BDMA_CH) {
542 int ch;
543
544 if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
545 dev_dbg(&priv->pdev->dev,
546 "IRQ from DMA channel 0x%08x\n", dev_ch_int);
547
548 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
549 if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
550 continue;
551 tsi721_bdma_handler(&priv->bdma[ch]);
552 }
553 }
554 }
555#endif
544 return IRQ_HANDLED; 556 return IRQ_HANDLED;
545} 557}
546 558
@@ -553,18 +565,26 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
553 priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 565 priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
554 iowrite32(TSI721_SR_CHINT_IDBQRCV, 566 iowrite32(TSI721_SR_CHINT_IDBQRCV,
555 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 567 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
556 iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
557 priv->regs + TSI721_DEV_CHAN_INTE);
558 568
559 /* Enable SRIO MAC interrupts */ 569 /* Enable SRIO MAC interrupts */
560 iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, 570 iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
561 priv->regs + TSI721_RIO_EM_DEV_INT_EN); 571 priv->regs + TSI721_RIO_EM_DEV_INT_EN);
562 572
573 /* Enable interrupts from channels in use */
574#ifdef CONFIG_RAPIDIO_DMA_ENGINE
575 intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
576 (TSI721_INT_BDMA_CHAN_M &
577 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
578#else
579 intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
580#endif
581 iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
582
563 if (priv->flags & TSI721_USING_MSIX) 583 if (priv->flags & TSI721_USING_MSIX)
564 intr = TSI721_DEV_INT_SRIO; 584 intr = TSI721_DEV_INT_SRIO;
565 else 585 else
566 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | 586 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
567 TSI721_DEV_INT_SMSG_CH; 587 TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
568 588
569 iowrite32(intr, priv->regs + TSI721_DEV_INTE); 589 iowrite32(intr, priv->regs + TSI721_DEV_INTE);
570 ioread32(priv->regs + TSI721_DEV_INTE); 590 ioread32(priv->regs + TSI721_DEV_INTE);
@@ -715,12 +735,29 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
715 TSI721_MSIX_OMSG_INT(i); 735 TSI721_MSIX_OMSG_INT(i);
716 } 736 }
717 737
738#ifdef CONFIG_RAPIDIO_DMA_ENGINE
739 /*
740 * Initialize MSI-X entries for Block DMA Engine:
741 * this driver supports XXX DMA channels
742 * (one is reserved for SRIO maintenance transactions)
743 */
744 for (i = 0; i < TSI721_DMA_CHNUM; i++) {
745 entries[TSI721_VECT_DMA0_DONE + i].entry =
746 TSI721_MSIX_DMACH_DONE(i);
747 entries[TSI721_VECT_DMA0_INT + i].entry =
748 TSI721_MSIX_DMACH_INT(i);
749 }
750#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
751
718 err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries)); 752 err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
719 if (err) { 753 if (err) {
720 if (err > 0) 754 if (err > 0)
721 dev_info(&priv->pdev->dev, 755 dev_info(&priv->pdev->dev,
722 "Only %d MSI-X vectors available, " 756 "Only %d MSI-X vectors available, "
723 "not using MSI-X\n", err); 757 "not using MSI-X\n", err);
758 else
759 dev_err(&priv->pdev->dev,
760 "Failed to enable MSI-X (err=%d)\n", err);
724 return err; 761 return err;
725 } 762 }
726 763
@@ -760,6 +797,22 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
760 i, pci_name(priv->pdev)); 797 i, pci_name(priv->pdev));
761 } 798 }
762 799
800#ifdef CONFIG_RAPIDIO_DMA_ENGINE
801 for (i = 0; i < TSI721_DMA_CHNUM; i++) {
802 priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
803 entries[TSI721_VECT_DMA0_DONE + i].vector;
804 snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
805 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
806 i, pci_name(priv->pdev));
807
808 priv->msix[TSI721_VECT_DMA0_INT + i].vector =
809 entries[TSI721_VECT_DMA0_INT + i].vector;
810 snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
811 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
812 i, pci_name(priv->pdev));
813 }
814#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
815
763 return 0; 816 return 0;
764} 817}
765#endif /* CONFIG_PCI_MSI */ 818#endif /* CONFIG_PCI_MSI */
@@ -888,20 +941,34 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
888 priv->idb_base = NULL; 941 priv->idb_base = NULL;
889} 942}
890 943
891static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) 944/**
945 * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
946 * @priv: pointer to tsi721 private data
947 *
948 * Initialize BDMA channel allocated for RapidIO maintenance read/write
949 * request generation
950 * Returns %0 on success or %-ENOMEM on failure.
951 */
952static int tsi721_bdma_maint_init(struct tsi721_device *priv)
892{ 953{
893 struct tsi721_dma_desc *bd_ptr; 954 struct tsi721_dma_desc *bd_ptr;
894 u64 *sts_ptr; 955 u64 *sts_ptr;
895 dma_addr_t bd_phys, sts_phys; 956 dma_addr_t bd_phys, sts_phys;
896 int sts_size; 957 int sts_size;
897 int bd_num = priv->bdma[chnum].bd_num; 958 int bd_num = 2;
959 void __iomem *regs;
898 960
899 dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum); 961 dev_dbg(&priv->pdev->dev,
962 "Init Block DMA Engine for Maintenance requests, CH%d\n",
963 TSI721_DMACH_MAINT);
900 964
901 /* 965 /*
902 * Initialize DMA channel for maintenance requests 966 * Initialize DMA channel for maintenance requests
903 */ 967 */
904 968
969 priv->mdma.ch_id = TSI721_DMACH_MAINT;
970 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
971
905 /* Allocate space for DMA descriptors */ 972 /* Allocate space for DMA descriptors */
906 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 973 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
907 bd_num * sizeof(struct tsi721_dma_desc), 974 bd_num * sizeof(struct tsi721_dma_desc),
@@ -909,8 +976,9 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
909 if (!bd_ptr) 976 if (!bd_ptr)
910 return -ENOMEM; 977 return -ENOMEM;
911 978
912 priv->bdma[chnum].bd_phys = bd_phys; 979 priv->mdma.bd_num = bd_num;
913 priv->bdma[chnum].bd_base = bd_ptr; 980 priv->mdma.bd_phys = bd_phys;
981 priv->mdma.bd_base = bd_ptr;
914 982
915 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", 983 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
916 bd_ptr, (unsigned long long)bd_phys); 984 bd_ptr, (unsigned long long)bd_phys);
@@ -927,13 +995,13 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
927 dma_free_coherent(&priv->pdev->dev, 995 dma_free_coherent(&priv->pdev->dev,
928 bd_num * sizeof(struct tsi721_dma_desc), 996 bd_num * sizeof(struct tsi721_dma_desc),
929 bd_ptr, bd_phys); 997 bd_ptr, bd_phys);
930 priv->bdma[chnum].bd_base = NULL; 998 priv->mdma.bd_base = NULL;
931 return -ENOMEM; 999 return -ENOMEM;
932 } 1000 }
933 1001
934 priv->bdma[chnum].sts_phys = sts_phys; 1002 priv->mdma.sts_phys = sts_phys;
935 priv->bdma[chnum].sts_base = sts_ptr; 1003 priv->mdma.sts_base = sts_ptr;
936 priv->bdma[chnum].sts_size = sts_size; 1004 priv->mdma.sts_size = sts_size;
937 1005
938 dev_dbg(&priv->pdev->dev, 1006 dev_dbg(&priv->pdev->dev,
939 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 1007 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
@@ -946,83 +1014,61 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
946 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); 1014 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
947 1015
948 /* Setup DMA descriptor pointers */ 1016 /* Setup DMA descriptor pointers */
949 iowrite32(((u64)bd_phys >> 32), 1017 iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
950 priv->regs + TSI721_DMAC_DPTRH(chnum));
951 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), 1018 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
952 priv->regs + TSI721_DMAC_DPTRL(chnum)); 1019 regs + TSI721_DMAC_DPTRL);
953 1020
954 /* Setup descriptor status FIFO */ 1021 /* Setup descriptor status FIFO */
955 iowrite32(((u64)sts_phys >> 32), 1022 iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
956 priv->regs + TSI721_DMAC_DSBH(chnum));
957 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), 1023 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
958 priv->regs + TSI721_DMAC_DSBL(chnum)); 1024 regs + TSI721_DMAC_DSBL);
959 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), 1025 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
960 priv->regs + TSI721_DMAC_DSSZ(chnum)); 1026 regs + TSI721_DMAC_DSSZ);
961 1027
962 /* Clear interrupt bits */ 1028 /* Clear interrupt bits */
963 iowrite32(TSI721_DMAC_INT_ALL, 1029 iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
964 priv->regs + TSI721_DMAC_INT(chnum));
965 1030
966 ioread32(priv->regs + TSI721_DMAC_INT(chnum)); 1031 ioread32(regs + TSI721_DMAC_INT);
967 1032
968 /* Toggle DMA channel initialization */ 1033 /* Toggle DMA channel initialization */
969 iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum)); 1034 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
970 ioread32(priv->regs + TSI721_DMAC_CTL(chnum)); 1035 ioread32(regs + TSI721_DMAC_CTL);
971 udelay(10); 1036 udelay(10);
972 1037
973 return 0; 1038 return 0;
974} 1039}
975 1040
976static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum) 1041static int tsi721_bdma_maint_free(struct tsi721_device *priv)
977{ 1042{
978 u32 ch_stat; 1043 u32 ch_stat;
1044 struct tsi721_bdma_maint *mdma = &priv->mdma;
1045 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
979 1046
980 if (priv->bdma[chnum].bd_base == NULL) 1047 if (mdma->bd_base == NULL)
981 return 0; 1048 return 0;
982 1049
983 /* Check if DMA channel still running */ 1050 /* Check if DMA channel still running */
984 ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum)); 1051 ch_stat = ioread32(regs + TSI721_DMAC_STS);
985 if (ch_stat & TSI721_DMAC_STS_RUN) 1052 if (ch_stat & TSI721_DMAC_STS_RUN)
986 return -EFAULT; 1053 return -EFAULT;
987 1054
988 /* Put DMA channel into init state */ 1055 /* Put DMA channel into init state */
989 iowrite32(TSI721_DMAC_CTL_INIT, 1056 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
990 priv->regs + TSI721_DMAC_CTL(chnum));
991 1057
992 /* Free space allocated for DMA descriptors */ 1058 /* Free space allocated for DMA descriptors */
993 dma_free_coherent(&priv->pdev->dev, 1059 dma_free_coherent(&priv->pdev->dev,
994 priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc), 1060 mdma->bd_num * sizeof(struct tsi721_dma_desc),
995 priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys); 1061 mdma->bd_base, mdma->bd_phys);
996 priv->bdma[chnum].bd_base = NULL; 1062 mdma->bd_base = NULL;
997 1063
998 /* Free space allocated for status FIFO */ 1064 /* Free space allocated for status FIFO */
999 dma_free_coherent(&priv->pdev->dev, 1065 dma_free_coherent(&priv->pdev->dev,
1000 priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts), 1066 mdma->sts_size * sizeof(struct tsi721_dma_sts),
1001 priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys); 1067 mdma->sts_base, mdma->sts_phys);
1002 priv->bdma[chnum].sts_base = NULL; 1068 mdma->sts_base = NULL;
1003 return 0;
1004}
1005
1006static int tsi721_bdma_init(struct tsi721_device *priv)
1007{
1008 /* Initialize BDMA channel allocated for RapidIO maintenance read/write
1009 * request generation
1010 */
1011 priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
1012 if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
1013 dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
1014 " channel %d, aborting\n", TSI721_DMACH_MAINT);
1015 return -ENOMEM;
1016 }
1017
1018 return 0; 1069 return 0;
1019} 1070}
1020 1071
1021static void tsi721_bdma_free(struct tsi721_device *priv)
1022{
1023 tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
1024}
1025
1026/* Enable Inbound Messaging Interrupts */ 1072/* Enable Inbound Messaging Interrupts */
1027static void 1073static void
1028tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, 1074tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
@@ -2035,7 +2081,8 @@ static void tsi721_disable_ints(struct tsi721_device *priv)
2035 2081
2036 /* Disable all BDMA Channel interrupts */ 2082 /* Disable all BDMA Channel interrupts */
2037 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) 2083 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
2038 iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch)); 2084 iowrite32(0,
2085 priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
2039 2086
2040 /* Disable all general BDMA interrupts */ 2087 /* Disable all general BDMA interrupts */
2041 iowrite32(0, priv->regs + TSI721_BDMA_INTE); 2088 iowrite32(0, priv->regs + TSI721_BDMA_INTE);
@@ -2104,6 +2151,7 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2104 mport->phy_type = RIO_PHY_SERIAL; 2151 mport->phy_type = RIO_PHY_SERIAL;
2105 mport->priv = (void *)priv; 2152 mport->priv = (void *)priv;
2106 mport->phys_efptr = 0x100; 2153 mport->phys_efptr = 0x100;
2154 priv->mport = mport;
2107 2155
2108 INIT_LIST_HEAD(&mport->dbells); 2156 INIT_LIST_HEAD(&mport->dbells);
2109 2157
@@ -2129,17 +2177,21 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2129 if (!err) { 2177 if (!err) {
2130 tsi721_interrupts_init(priv); 2178 tsi721_interrupts_init(priv);
2131 ops->pwenable = tsi721_pw_enable; 2179 ops->pwenable = tsi721_pw_enable;
2132 } else 2180 } else {
2133 dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " 2181 dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
2134 "vector %02X err=0x%x\n", pdev->irq, err); 2182 "vector %02X err=0x%x\n", pdev->irq, err);
2183 goto err_exit;
2184 }
2135 2185
2186#ifdef CONFIG_RAPIDIO_DMA_ENGINE
2187 tsi721_register_dma(priv);
2188#endif
2136 /* Enable SRIO link */ 2189 /* Enable SRIO link */
2137 iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | 2190 iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
2138 TSI721_DEVCTL_SRBOOT_CMPL, 2191 TSI721_DEVCTL_SRBOOT_CMPL,
2139 priv->regs + TSI721_DEVCTL); 2192 priv->regs + TSI721_DEVCTL);
2140 2193
2141 rio_register_mport(mport); 2194 rio_register_mport(mport);
2142 priv->mport = mport;
2143 2195
2144 if (mport->host_deviceid >= 0) 2196 if (mport->host_deviceid >= 0)
2145 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | 2197 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
@@ -2149,6 +2201,11 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2149 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); 2201 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2150 2202
2151 return 0; 2203 return 0;
2204
2205err_exit:
2206 kfree(mport);
2207 kfree(ops);
2208 return err;
2152} 2209}
2153 2210
2154static int __devinit tsi721_probe(struct pci_dev *pdev, 2211static int __devinit tsi721_probe(struct pci_dev *pdev,
@@ -2294,7 +2351,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2294 tsi721_init_pc2sr_mapping(priv); 2351 tsi721_init_pc2sr_mapping(priv);
2295 tsi721_init_sr2pc_mapping(priv); 2352 tsi721_init_sr2pc_mapping(priv);
2296 2353
2297 if (tsi721_bdma_init(priv)) { 2354 if (tsi721_bdma_maint_init(priv)) {
2298 dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); 2355 dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
2299 err = -ENOMEM; 2356 err = -ENOMEM;
2300 goto err_unmap_bars; 2357 goto err_unmap_bars;
@@ -2319,7 +2376,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2319err_free_consistent: 2376err_free_consistent:
2320 tsi721_doorbell_free(priv); 2377 tsi721_doorbell_free(priv);
2321err_free_bdma: 2378err_free_bdma:
2322 tsi721_bdma_free(priv); 2379 tsi721_bdma_maint_free(priv);
2323err_unmap_bars: 2380err_unmap_bars:
2324 if (priv->regs) 2381 if (priv->regs)
2325 iounmap(priv->regs); 2382 iounmap(priv->regs);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 1c226b31af13..59de9d7be346 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -167,6 +167,8 @@
167#define TSI721_DEV_INTE 0x29840 167#define TSI721_DEV_INTE 0x29840
168#define TSI721_DEV_INT 0x29844 168#define TSI721_DEV_INT 0x29844
169#define TSI721_DEV_INTSET 0x29848 169#define TSI721_DEV_INTSET 0x29848
170#define TSI721_DEV_INT_BDMA_CH 0x00002000
171#define TSI721_DEV_INT_BDMA_NCH 0x00001000
170#define TSI721_DEV_INT_SMSG_CH 0x00000800 172#define TSI721_DEV_INT_SMSG_CH 0x00000800
171#define TSI721_DEV_INT_SMSG_NCH 0x00000400 173#define TSI721_DEV_INT_SMSG_NCH 0x00000400
172#define TSI721_DEV_INT_SR2PC_CH 0x00000200 174#define TSI721_DEV_INT_SR2PC_CH 0x00000200
@@ -181,6 +183,8 @@
181#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x))) 183#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x)))
182#define TSI721_INT_OMSG_CHAN_M 0x0000ff00 184#define TSI721_INT_OMSG_CHAN_M 0x0000ff00
183#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x))) 185#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x)))
186#define TSI721_INT_BDMA_CHAN_M 0x000000ff
187#define TSI721_INT_BDMA_CHAN(x) (1 << (x))
184 188
185/* 189/*
186 * PC2SR block registers 190 * PC2SR block registers
@@ -235,14 +239,16 @@
235 * x = 0..7 239 * x = 0..7
236 */ 240 */
237 241
238#define TSI721_DMAC_DWRCNT(x) (0x51000 + (x) * 0x1000) 242#define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000)
239#define TSI721_DMAC_DRDCNT(x) (0x51004 + (x) * 0x1000)
240 243
241#define TSI721_DMAC_CTL(x) (0x51008 + (x) * 0x1000) 244#define TSI721_DMAC_DWRCNT 0x000
245#define TSI721_DMAC_DRDCNT 0x004
246
247#define TSI721_DMAC_CTL 0x008
242#define TSI721_DMAC_CTL_SUSP 0x00000002 248#define TSI721_DMAC_CTL_SUSP 0x00000002
243#define TSI721_DMAC_CTL_INIT 0x00000001 249#define TSI721_DMAC_CTL_INIT 0x00000001
244 250
245#define TSI721_DMAC_INT(x) (0x5100c + (x) * 0x1000) 251#define TSI721_DMAC_INT 0x00c
246#define TSI721_DMAC_INT_STFULL 0x00000010 252#define TSI721_DMAC_INT_STFULL 0x00000010
247#define TSI721_DMAC_INT_DONE 0x00000008 253#define TSI721_DMAC_INT_DONE 0x00000008
248#define TSI721_DMAC_INT_SUSP 0x00000004 254#define TSI721_DMAC_INT_SUSP 0x00000004
@@ -250,34 +256,33 @@
250#define TSI721_DMAC_INT_IOFDONE 0x00000001 256#define TSI721_DMAC_INT_IOFDONE 0x00000001
251#define TSI721_DMAC_INT_ALL 0x0000001f 257#define TSI721_DMAC_INT_ALL 0x0000001f
252 258
253#define TSI721_DMAC_INTSET(x) (0x51010 + (x) * 0x1000) 259#define TSI721_DMAC_INTSET 0x010
254 260
255#define TSI721_DMAC_STS(x) (0x51014 + (x) * 0x1000) 261#define TSI721_DMAC_STS 0x014
256#define TSI721_DMAC_STS_ABORT 0x00400000 262#define TSI721_DMAC_STS_ABORT 0x00400000
257#define TSI721_DMAC_STS_RUN 0x00200000 263#define TSI721_DMAC_STS_RUN 0x00200000
258#define TSI721_DMAC_STS_CS 0x001f0000 264#define TSI721_DMAC_STS_CS 0x001f0000
259 265
260#define TSI721_DMAC_INTE(x) (0x51018 + (x) * 0x1000) 266#define TSI721_DMAC_INTE 0x018
261 267
262#define TSI721_DMAC_DPTRL(x) (0x51024 + (x) * 0x1000) 268#define TSI721_DMAC_DPTRL 0x024
263#define TSI721_DMAC_DPTRL_MASK 0xffffffe0 269#define TSI721_DMAC_DPTRL_MASK 0xffffffe0
264 270
265#define TSI721_DMAC_DPTRH(x) (0x51028 + (x) * 0x1000) 271#define TSI721_DMAC_DPTRH 0x028
266 272
267#define TSI721_DMAC_DSBL(x) (0x5102c + (x) * 0x1000) 273#define TSI721_DMAC_DSBL 0x02c
268#define TSI721_DMAC_DSBL_MASK 0xffffffc0 274#define TSI721_DMAC_DSBL_MASK 0xffffffc0
269 275
270#define TSI721_DMAC_DSBH(x) (0x51030 + (x) * 0x1000) 276#define TSI721_DMAC_DSBH 0x030
271 277
272#define TSI721_DMAC_DSSZ(x) (0x51034 + (x) * 0x1000) 278#define TSI721_DMAC_DSSZ 0x034
273#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f 279#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f
274#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4) 280#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4)
275 281
276 282#define TSI721_DMAC_DSRP 0x038
277#define TSI721_DMAC_DSRP(x) (0x51038 + (x) * 0x1000)
278#define TSI721_DMAC_DSRP_MASK 0x0007ffff 283#define TSI721_DMAC_DSRP_MASK 0x0007ffff
279 284
280#define TSI721_DMAC_DSWP(x) (0x5103c + (x) * 0x1000) 285#define TSI721_DMAC_DSWP 0x03c
281#define TSI721_DMAC_DSWP_MASK 0x0007ffff 286#define TSI721_DMAC_DSWP_MASK 0x0007ffff
282 287
283#define TSI721_BDMA_INTE 0x5f000 288#define TSI721_BDMA_INTE 0x5f000
@@ -612,6 +617,8 @@ enum dma_rtype {
612#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */ 617#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
613#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ 618#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
614 619
620#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */
621
615#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0) 622#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0)
616 623
617enum tsi721_smsg_int_flag { 624enum tsi721_smsg_int_flag {
@@ -626,7 +633,48 @@ enum tsi721_smsg_int_flag {
626 633
627/* Structures */ 634/* Structures */
628 635
636#ifdef CONFIG_RAPIDIO_DMA_ENGINE
637
638struct tsi721_tx_desc {
639 struct dma_async_tx_descriptor txd;
640 struct tsi721_dma_desc *hw_desc;
641 u16 destid;
642 /* low 64-bits of 66-bit RIO address */
643 u64 rio_addr;
644 /* upper 2-bits of 66-bit RIO address */
645 u8 rio_addr_u;
646 bool interrupt;
647 struct list_head desc_node;
648 struct list_head tx_list;
649};
650
629struct tsi721_bdma_chan { 651struct tsi721_bdma_chan {
652 int id;
653 void __iomem *regs;
654 int bd_num; /* number of buffer descriptors */
655 void *bd_base; /* start of DMA descriptors */
656 dma_addr_t bd_phys;
657 void *sts_base; /* start of DMA BD status FIFO */
658 dma_addr_t sts_phys;
659 int sts_size;
660 u32 sts_rdptr;
661 u32 wr_count;
662 u32 wr_count_next;
663
664 struct dma_chan dchan;
665 struct tsi721_tx_desc *tx_desc;
666 spinlock_t lock;
667 struct list_head active_list;
668 struct list_head queue;
669 struct list_head free_list;
670 dma_cookie_t completed_cookie;
671 struct tasklet_struct tasklet;
672};
673
674#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
675
676struct tsi721_bdma_maint {
677 int ch_id; /* BDMA channel number */
630 int bd_num; /* number of buffer descriptors */ 678 int bd_num; /* number of buffer descriptors */
631 void *bd_base; /* start of DMA descriptors */ 679 void *bd_base; /* start of DMA descriptors */
632 dma_addr_t bd_phys; 680 dma_addr_t bd_phys;
@@ -721,6 +769,24 @@ enum tsi721_msix_vect {
721 TSI721_VECT_IMB1_INT, 769 TSI721_VECT_IMB1_INT,
722 TSI721_VECT_IMB2_INT, 770 TSI721_VECT_IMB2_INT,
723 TSI721_VECT_IMB3_INT, 771 TSI721_VECT_IMB3_INT,
772#ifdef CONFIG_RAPIDIO_DMA_ENGINE
773 TSI721_VECT_DMA0_DONE,
774 TSI721_VECT_DMA1_DONE,
775 TSI721_VECT_DMA2_DONE,
776 TSI721_VECT_DMA3_DONE,
777 TSI721_VECT_DMA4_DONE,
778 TSI721_VECT_DMA5_DONE,
779 TSI721_VECT_DMA6_DONE,
780 TSI721_VECT_DMA7_DONE,
781 TSI721_VECT_DMA0_INT,
782 TSI721_VECT_DMA1_INT,
783 TSI721_VECT_DMA2_INT,
784 TSI721_VECT_DMA3_INT,
785 TSI721_VECT_DMA4_INT,
786 TSI721_VECT_DMA5_INT,
787 TSI721_VECT_DMA6_INT,
788 TSI721_VECT_DMA7_INT,
789#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
724 TSI721_VECT_MAX 790 TSI721_VECT_MAX
725}; 791};
726 792
@@ -754,7 +820,11 @@ struct tsi721_device {
754 u32 pw_discard_count; 820 u32 pw_discard_count;
755 821
756 /* BDMA Engine */ 822 /* BDMA Engine */
823 struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
824
825#ifdef CONFIG_RAPIDIO_DMA_ENGINE
757 struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM]; 826 struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
827#endif
758 828
759 /* Inbound Messaging */ 829 /* Inbound Messaging */
760 int imsg_init[TSI721_IMSG_CHNUM]; 830 int imsg_init[TSI721_IMSG_CHNUM];
@@ -765,4 +835,9 @@ struct tsi721_device {
765 struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM]; 835 struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
766}; 836};
767 837
838#ifdef CONFIG_RAPIDIO_DMA_ENGINE
839extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
840extern int __devinit tsi721_register_dma(struct tsi721_device *priv);
841#endif
842
768#endif 843#endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644
index 000000000000..92e06a5c62ec
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -0,0 +1,823 @@
1/*
2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
3 *
4 * Copyright 2011 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/io.h>
23#include <linux/errno.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/rio.h>
30#include <linux/rio_drv.h>
31#include <linux/dma-mapping.h>
32#include <linux/interrupt.h>
33#include <linux/kfifo.h>
34#include <linux/delay.h>
35
36#include "tsi721.h"
37
38static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
39{
40 return container_of(chan, struct tsi721_bdma_chan, dchan);
41}
42
43static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
44{
45 return container_of(ddev, struct rio_mport, dma)->priv;
46}
47
48static inline
49struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
50{
51 return container_of(txd, struct tsi721_tx_desc, txd);
52}
53
54static inline
55struct tsi721_tx_desc *tsi721_dma_first_active(
56 struct tsi721_bdma_chan *bdma_chan)
57{
58 return list_first_entry(&bdma_chan->active_list,
59 struct tsi721_tx_desc, desc_node);
60}
61
62static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
63{
64 struct tsi721_dma_desc *bd_ptr;
65 struct device *dev = bdma_chan->dchan.device->dev;
66 u64 *sts_ptr;
67 dma_addr_t bd_phys;
68 dma_addr_t sts_phys;
69 int sts_size;
70 int bd_num = bdma_chan->bd_num;
71
72 dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
73
74 /* Allocate space for DMA descriptors */
75 bd_ptr = dma_zalloc_coherent(dev,
76 bd_num * sizeof(struct tsi721_dma_desc),
77 &bd_phys, GFP_KERNEL);
78 if (!bd_ptr)
79 return -ENOMEM;
80
81 bdma_chan->bd_phys = bd_phys;
82 bdma_chan->bd_base = bd_ptr;
83
84 dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
85 bd_ptr, (unsigned long long)bd_phys);
86
87 /* Allocate space for descriptor status FIFO */
88 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
89 bd_num : TSI721_DMA_MINSTSSZ;
90 sts_size = roundup_pow_of_two(sts_size);
91 sts_ptr = dma_zalloc_coherent(dev,
92 sts_size * sizeof(struct tsi721_dma_sts),
93 &sts_phys, GFP_KERNEL);
94 if (!sts_ptr) {
95 /* Free space allocated for DMA descriptors */
96 dma_free_coherent(dev,
97 bd_num * sizeof(struct tsi721_dma_desc),
98 bd_ptr, bd_phys);
99 bdma_chan->bd_base = NULL;
100 return -ENOMEM;
101 }
102
103 bdma_chan->sts_phys = sts_phys;
104 bdma_chan->sts_base = sts_ptr;
105 bdma_chan->sts_size = sts_size;
106
107 dev_dbg(dev,
108 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
109 sts_ptr, (unsigned long long)sts_phys, sts_size);
110
111 /* Initialize DMA descriptors ring */
112 bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
113 bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
114 TSI721_DMAC_DPTRL_MASK);
115 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
116
117 /* Setup DMA descriptor pointers */
118 iowrite32(((u64)bd_phys >> 32),
119 bdma_chan->regs + TSI721_DMAC_DPTRH);
120 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
121 bdma_chan->regs + TSI721_DMAC_DPTRL);
122
123 /* Setup descriptor status FIFO */
124 iowrite32(((u64)sts_phys >> 32),
125 bdma_chan->regs + TSI721_DMAC_DSBH);
126 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
127 bdma_chan->regs + TSI721_DMAC_DSBL);
128 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
129 bdma_chan->regs + TSI721_DMAC_DSSZ);
130
131 /* Clear interrupt bits */
132 iowrite32(TSI721_DMAC_INT_ALL,
133 bdma_chan->regs + TSI721_DMAC_INT);
134
135 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
136
137 /* Toggle DMA channel initialization */
138 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
139 ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
140 bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
141 bdma_chan->sts_rdptr = 0;
142 udelay(10);
143
144 return 0;
145}
146
147static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
148{
149 u32 ch_stat;
150
151 if (bdma_chan->bd_base == NULL)
152 return 0;
153
154 /* Check if DMA channel still running */
155 ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
156 if (ch_stat & TSI721_DMAC_STS_RUN)
157 return -EFAULT;
158
159 /* Put DMA channel into init state */
160 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
161
162 /* Free space allocated for DMA descriptors */
163 dma_free_coherent(bdma_chan->dchan.device->dev,
164 bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
165 bdma_chan->bd_base, bdma_chan->bd_phys);
166 bdma_chan->bd_base = NULL;
167
168 /* Free space allocated for status FIFO */
169 dma_free_coherent(bdma_chan->dchan.device->dev,
170 bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
171 bdma_chan->sts_base, bdma_chan->sts_phys);
172 bdma_chan->sts_base = NULL;
173 return 0;
174}
175
176static void
177tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
178{
179 if (enable) {
180 /* Clear pending BDMA channel interrupts */
181 iowrite32(TSI721_DMAC_INT_ALL,
182 bdma_chan->regs + TSI721_DMAC_INT);
183 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
184 /* Enable BDMA channel interrupts */
185 iowrite32(TSI721_DMAC_INT_ALL,
186 bdma_chan->regs + TSI721_DMAC_INTE);
187 } else {
188 /* Disable BDMA channel interrupts */
189 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
190 /* Clear pending BDMA channel interrupts */
191 iowrite32(TSI721_DMAC_INT_ALL,
192 bdma_chan->regs + TSI721_DMAC_INT);
193 }
194
195}
196
197static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
198{
199 u32 sts;
200
201 sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
202 return ((sts & TSI721_DMAC_STS_RUN) == 0);
203}
204
205void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
206{
207 /* Disable BDMA channel interrupts */
208 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
209
210 tasklet_schedule(&bdma_chan->tasklet);
211}
212
213#ifdef CONFIG_PCI_MSI
214/**
215 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
216 * @irq: Linux interrupt number
217 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
218 *
219 * Handles BDMA channel interrupts signaled using MSI-X.
220 */
221static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
222{
223 struct tsi721_bdma_chan *bdma_chan = ptr;
224
225 tsi721_bdma_handler(bdma_chan);
226 return IRQ_HANDLED;
227}
228#endif /* CONFIG_PCI_MSI */
229
230/* Must be called with the spinlock held */
231static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
232{
233 if (!tsi721_dma_is_idle(bdma_chan)) {
234 dev_err(bdma_chan->dchan.device->dev,
235 "BUG: Attempt to start non-idle channel\n");
236 return;
237 }
238
239 if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
240 dev_err(bdma_chan->dchan.device->dev,
241 "BUG: Attempt to start DMA with no BDs ready\n");
242 return;
243 }
244
245 dev_dbg(bdma_chan->dchan.device->dev,
246 "tx_chan: %p, chan: %d, regs: %p\n",
247 bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
248
249 iowrite32(bdma_chan->wr_count_next,
250 bdma_chan->regs + TSI721_DMAC_DWRCNT);
251 ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
252
253 bdma_chan->wr_count = bdma_chan->wr_count_next;
254}
255
256static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
257 struct tsi721_tx_desc *desc)
258{
259 dev_dbg(bdma_chan->dchan.device->dev,
260 "Put desc: %p into free list\n", desc);
261
262 if (desc) {
263 spin_lock_bh(&bdma_chan->lock);
264 list_splice_init(&desc->tx_list, &bdma_chan->free_list);
265 list_add(&desc->desc_node, &bdma_chan->free_list);
266 bdma_chan->wr_count_next = bdma_chan->wr_count;
267 spin_unlock_bh(&bdma_chan->lock);
268 }
269}
270
271static
272struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
273{
274 struct tsi721_tx_desc *tx_desc, *_tx_desc;
275 struct tsi721_tx_desc *ret = NULL;
276 int i;
277
278 spin_lock_bh(&bdma_chan->lock);
279 list_for_each_entry_safe(tx_desc, _tx_desc,
280 &bdma_chan->free_list, desc_node) {
281 if (async_tx_test_ack(&tx_desc->txd)) {
282 list_del(&tx_desc->desc_node);
283 ret = tx_desc;
284 break;
285 }
286 dev_dbg(bdma_chan->dchan.device->dev,
287 "desc %p not ACKed\n", tx_desc);
288 }
289
290 i = bdma_chan->wr_count_next % bdma_chan->bd_num;
291 if (i == bdma_chan->bd_num - 1) {
292 i = 0;
293 bdma_chan->wr_count_next++; /* skip link descriptor */
294 }
295
296 bdma_chan->wr_count_next++;
297 tx_desc->txd.phys = bdma_chan->bd_phys +
298 i * sizeof(struct tsi721_dma_desc);
299 tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
300
301 spin_unlock_bh(&bdma_chan->lock);
302
303 return ret;
304}
305
306static int
307tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
308 struct tsi721_tx_desc *desc, struct scatterlist *sg,
309 enum dma_rtype rtype, u32 sys_size)
310{
311 struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
312 u64 rio_addr;
313
314 if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
315 dev_err(bdma_chan->dchan.device->dev,
316 "SG element is too large\n");
317 return -EINVAL;
318 }
319
320 dev_dbg(bdma_chan->dchan.device->dev,
321 "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
322 (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
323 sg_dma_len(sg));
324
325 dev_dbg(bdma_chan->dchan.device->dev,
326 "bd_ptr = %p did=%d raddr=0x%llx\n",
327 bd_ptr, desc->destid, desc->rio_addr);
328
329 /* Initialize DMA descriptor */
330 bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
331 (rtype << 19) | desc->destid);
332 if (desc->interrupt)
333 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
334 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
335 (sys_size << 26) | sg_dma_len(sg));
336 rio_addr = (desc->rio_addr >> 2) |
337 ((u64)(desc->rio_addr_u & 0x3) << 62);
338 bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
339 bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
340 bd_ptr->t1.bufptr_lo = cpu_to_le32(
341 (u64)sg_dma_address(sg) & 0xffffffff);
342 bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
343 bd_ptr->t1.s_dist = 0;
344 bd_ptr->t1.s_size = 0;
345
346 return 0;
347}
348
349static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
350 struct tsi721_tx_desc *desc)
351{
352 struct dma_async_tx_descriptor *txd = &desc->txd;
353 dma_async_tx_callback callback = txd->callback;
354 void *param = txd->callback_param;
355
356 list_splice_init(&desc->tx_list, &bdma_chan->free_list);
357 list_move(&desc->desc_node, &bdma_chan->free_list);
358 bdma_chan->completed_cookie = txd->cookie;
359
360 if (callback)
361 callback(param);
362}
363
364static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
365{
366 struct tsi721_tx_desc *desc, *_d;
367 LIST_HEAD(list);
368
369 BUG_ON(!tsi721_dma_is_idle(bdma_chan));
370
371 if (!list_empty(&bdma_chan->queue))
372 tsi721_start_dma(bdma_chan);
373
374 list_splice_init(&bdma_chan->active_list, &list);
375 list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
376
377 list_for_each_entry_safe(desc, _d, &list, desc_node)
378 tsi721_dma_chain_complete(bdma_chan, desc);
379}
380
381static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
382{
383 u32 srd_ptr;
384 u64 *sts_ptr;
385 int i, j;
386
387 /* Check and clear descriptor status FIFO entries */
388 srd_ptr = bdma_chan->sts_rdptr;
389 sts_ptr = bdma_chan->sts_base;
390 j = srd_ptr * 8;
391 while (sts_ptr[j]) {
392 for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
393 sts_ptr[j] = 0;
394
395 ++srd_ptr;
396 srd_ptr %= bdma_chan->sts_size;
397 j = srd_ptr * 8;
398 }
399
400 iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
401 bdma_chan->sts_rdptr = srd_ptr;
402}
403
404static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
405{
406 if (list_empty(&bdma_chan->active_list) ||
407 list_is_singular(&bdma_chan->active_list)) {
408 dev_dbg(bdma_chan->dchan.device->dev,
409 "%s: Active_list empty\n", __func__);
410 tsi721_dma_complete_all(bdma_chan);
411 } else {
412 dev_dbg(bdma_chan->dchan.device->dev,
413 "%s: Active_list NOT empty\n", __func__);
414 tsi721_dma_chain_complete(bdma_chan,
415 tsi721_dma_first_active(bdma_chan));
416 tsi721_start_dma(bdma_chan);
417 }
418}
419
420static void tsi721_dma_tasklet(unsigned long data)
421{
422 struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
423 u32 dmac_int, dmac_sts;
424
425 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
426 dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
427 __func__, bdma_chan->id, dmac_int);
428 /* Clear channel interrupts */
429 iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
430
431 if (dmac_int & TSI721_DMAC_INT_ERR) {
432 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
433 dev_err(bdma_chan->dchan.device->dev,
434 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
435 __func__, bdma_chan->id, dmac_sts);
436 }
437
438 if (dmac_int & TSI721_DMAC_INT_STFULL) {
439 dev_err(bdma_chan->dchan.device->dev,
440 "%s: DMAC%d descriptor status FIFO is full\n",
441 __func__, bdma_chan->id);
442 }
443
444 if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
445 tsi721_clr_stat(bdma_chan);
446 spin_lock(&bdma_chan->lock);
447 tsi721_advance_work(bdma_chan);
448 spin_unlock(&bdma_chan->lock);
449 }
450
451 /* Re-Enable BDMA channel interrupts */
452 iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
453}
454
455static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
456{
457 struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
458 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
459 dma_cookie_t cookie;
460
461 spin_lock_bh(&bdma_chan->lock);
462
463 cookie = txd->chan->cookie;
464 if (++cookie < 0)
465 cookie = 1;
466 txd->chan->cookie = cookie;
467 txd->cookie = cookie;
468
469 if (list_empty(&bdma_chan->active_list)) {
470 list_add_tail(&desc->desc_node, &bdma_chan->active_list);
471 tsi721_start_dma(bdma_chan);
472 } else {
473 list_add_tail(&desc->desc_node, &bdma_chan->queue);
474 }
475
476 spin_unlock_bh(&bdma_chan->lock);
477 return cookie;
478}
479
480static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
481{
482 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
483#ifdef CONFIG_PCI_MSI
484 struct tsi721_device *priv = to_tsi721(dchan->device);
485#endif
486 struct tsi721_tx_desc *desc = NULL;
487 LIST_HEAD(tmp_list);
488 int i;
489 int rc;
490
491 if (bdma_chan->bd_base)
492 return bdma_chan->bd_num - 1;
493
494 /* Initialize BDMA channel */
495 if (tsi721_bdma_ch_init(bdma_chan)) {
496 dev_err(dchan->device->dev, "Unable to initialize data DMA"
497 " channel %d, aborting\n", bdma_chan->id);
498 return -ENOMEM;
499 }
500
501 /* Alocate matching number of logical descriptors */
502 desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
503 GFP_KERNEL);
504 if (!desc) {
505 dev_err(dchan->device->dev,
506 "Failed to allocate logical descriptors\n");
507 rc = -ENOMEM;
508 goto err_out;
509 }
510
511 bdma_chan->tx_desc = desc;
512
513 for (i = 0; i < bdma_chan->bd_num - 1; i++) {
514 dma_async_tx_descriptor_init(&desc[i].txd, dchan);
515 desc[i].txd.tx_submit = tsi721_tx_submit;
516 desc[i].txd.flags = DMA_CTRL_ACK;
517 INIT_LIST_HEAD(&desc[i].tx_list);
518 list_add_tail(&desc[i].desc_node, &tmp_list);
519 }
520
521 spin_lock_bh(&bdma_chan->lock);
522 list_splice(&tmp_list, &bdma_chan->free_list);
523 bdma_chan->completed_cookie = dchan->cookie = 1;
524 spin_unlock_bh(&bdma_chan->lock);
525
526#ifdef CONFIG_PCI_MSI
527 if (priv->flags & TSI721_USING_MSIX) {
528 /* Request interrupt service if we are in MSI-X mode */
529 rc = request_irq(
530 priv->msix[TSI721_VECT_DMA0_DONE +
531 bdma_chan->id].vector,
532 tsi721_bdma_msix, 0,
533 priv->msix[TSI721_VECT_DMA0_DONE +
534 bdma_chan->id].irq_name,
535 (void *)bdma_chan);
536
537 if (rc) {
538 dev_dbg(dchan->device->dev,
539 "Unable to allocate MSI-X interrupt for "
540 "BDMA%d-DONE\n", bdma_chan->id);
541 goto err_out;
542 }
543
544 rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
545 bdma_chan->id].vector,
546 tsi721_bdma_msix, 0,
547 priv->msix[TSI721_VECT_DMA0_INT +
548 bdma_chan->id].irq_name,
549 (void *)bdma_chan);
550
551 if (rc) {
552 dev_dbg(dchan->device->dev,
553 "Unable to allocate MSI-X interrupt for "
554 "BDMA%d-INT\n", bdma_chan->id);
555 free_irq(
556 priv->msix[TSI721_VECT_DMA0_DONE +
557 bdma_chan->id].vector,
558 (void *)bdma_chan);
559 rc = -EIO;
560 goto err_out;
561 }
562 }
563#endif /* CONFIG_PCI_MSI */
564
565 tasklet_enable(&bdma_chan->tasklet);
566 tsi721_bdma_interrupt_enable(bdma_chan, 1);
567
568 return bdma_chan->bd_num - 1;
569
570err_out:
571 kfree(desc);
572 tsi721_bdma_ch_free(bdma_chan);
573 return rc;
574}
575
576static void tsi721_free_chan_resources(struct dma_chan *dchan)
577{
578 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
579#ifdef CONFIG_PCI_MSI
580 struct tsi721_device *priv = to_tsi721(dchan->device);
581#endif
582 LIST_HEAD(list);
583
584 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
585
586 if (bdma_chan->bd_base == NULL)
587 return;
588
589 BUG_ON(!list_empty(&bdma_chan->active_list));
590 BUG_ON(!list_empty(&bdma_chan->queue));
591
592 tasklet_disable(&bdma_chan->tasklet);
593
594 spin_lock_bh(&bdma_chan->lock);
595 list_splice_init(&bdma_chan->free_list, &list);
596 spin_unlock_bh(&bdma_chan->lock);
597
598 tsi721_bdma_interrupt_enable(bdma_chan, 0);
599
600#ifdef CONFIG_PCI_MSI
601 if (priv->flags & TSI721_USING_MSIX) {
602 free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
603 bdma_chan->id].vector, (void *)bdma_chan);
604 free_irq(priv->msix[TSI721_VECT_DMA0_INT +
605 bdma_chan->id].vector, (void *)bdma_chan);
606 }
607#endif /* CONFIG_PCI_MSI */
608
609 tsi721_bdma_ch_free(bdma_chan);
610 kfree(bdma_chan->tx_desc);
611}
612
613static
614enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
615 struct dma_tx_state *txstate)
616{
617 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
618 dma_cookie_t last_used;
619 dma_cookie_t last_completed;
620 int ret;
621
622 spin_lock_bh(&bdma_chan->lock);
623 last_completed = bdma_chan->completed_cookie;
624 last_used = dchan->cookie;
625 spin_unlock_bh(&bdma_chan->lock);
626
627 ret = dma_async_is_complete(cookie, last_completed, last_used);
628
629 dma_set_tx_state(txstate, last_completed, last_used, 0);
630
631 dev_dbg(dchan->device->dev,
632 "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
633 __func__, ret, last_completed, last_used);
634
635 return ret;
636}
637
638static void tsi721_issue_pending(struct dma_chan *dchan)
639{
640 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
641
642 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
643
644 if (tsi721_dma_is_idle(bdma_chan)) {
645 spin_lock_bh(&bdma_chan->lock);
646 tsi721_advance_work(bdma_chan);
647 spin_unlock_bh(&bdma_chan->lock);
648 } else
649 dev_dbg(dchan->device->dev,
650 "%s: DMA channel still busy\n", __func__);
651}
652
653static
654struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
655 struct scatterlist *sgl, unsigned int sg_len,
656 enum dma_transfer_direction dir, unsigned long flags,
657 void *tinfo)
658{
659 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
660 struct tsi721_tx_desc *desc = NULL;
661 struct tsi721_tx_desc *first = NULL;
662 struct scatterlist *sg;
663 struct rio_dma_ext *rext = tinfo;
664 u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
665 unsigned int i;
666 u32 sys_size = dma_to_mport(dchan->device)->sys_size;
667 enum dma_rtype rtype;
668
669 if (!sgl || !sg_len) {
670 dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
671 return NULL;
672 }
673
674 if (dir == DMA_DEV_TO_MEM)
675 rtype = NREAD;
676 else if (dir == DMA_MEM_TO_DEV) {
677 switch (rext->wr_type) {
678 case RDW_ALL_NWRITE:
679 rtype = ALL_NWRITE;
680 break;
681 case RDW_ALL_NWRITE_R:
682 rtype = ALL_NWRITE_R;
683 break;
684 case RDW_LAST_NWRITE_R:
685 default:
686 rtype = LAST_NWRITE_R;
687 break;
688 }
689 } else {
690 dev_err(dchan->device->dev,
691 "%s: Unsupported DMA direction option\n", __func__);
692 return NULL;
693 }
694
695 for_each_sg(sgl, sg, sg_len, i) {
696 int err;
697
698 dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
699 desc = tsi721_desc_get(bdma_chan);
700 if (!desc) {
701 dev_err(dchan->device->dev,
702 "Not enough descriptors available\n");
703 goto err_desc_get;
704 }
705
706 if (sg_is_last(sg))
707 desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
708 else
709 desc->interrupt = false;
710
711 desc->destid = rext->destid;
712 desc->rio_addr = rio_addr;
713 desc->rio_addr_u = 0;
714
715 err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
716 if (err) {
717 dev_err(dchan->device->dev,
718 "Failed to build desc: %d\n", err);
719 goto err_desc_get;
720 }
721
722 rio_addr += sg_dma_len(sg);
723
724 if (!first)
725 first = desc;
726 else
727 list_add_tail(&desc->desc_node, &first->tx_list);
728 }
729
730 first->txd.cookie = -EBUSY;
731 desc->txd.flags = flags;
732
733 return &first->txd;
734
735err_desc_get:
736 tsi721_desc_put(bdma_chan, first);
737 return NULL;
738}
739
740static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
741 unsigned long arg)
742{
743 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
744 struct tsi721_tx_desc *desc, *_d;
745 LIST_HEAD(list);
746
747 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
748
749 if (cmd != DMA_TERMINATE_ALL)
750 return -ENXIO;
751
752 spin_lock_bh(&bdma_chan->lock);
753
754 /* make sure to stop the transfer */
755 iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
756
757 list_splice_init(&bdma_chan->active_list, &list);
758 list_splice_init(&bdma_chan->queue, &list);
759
760 list_for_each_entry_safe(desc, _d, &list, desc_node)
761 tsi721_dma_chain_complete(bdma_chan, desc);
762
763 spin_unlock_bh(&bdma_chan->lock);
764
765 return 0;
766}
767
768int __devinit tsi721_register_dma(struct tsi721_device *priv)
769{
770 int i;
771 int nr_channels = TSI721_DMA_MAXCH;
772 int err;
773 struct rio_mport *mport = priv->mport;
774
775 mport->dma.dev = &priv->pdev->dev;
776 mport->dma.chancnt = nr_channels;
777
778 INIT_LIST_HEAD(&mport->dma.channels);
779
780 for (i = 0; i < nr_channels; i++) {
781 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
782
783 if (i == TSI721_DMACH_MAINT)
784 continue;
785
786 bdma_chan->bd_num = 64;
787 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
788
789 bdma_chan->dchan.device = &mport->dma;
790 bdma_chan->dchan.cookie = 1;
791 bdma_chan->dchan.chan_id = i;
792 bdma_chan->id = i;
793
794 spin_lock_init(&bdma_chan->lock);
795
796 INIT_LIST_HEAD(&bdma_chan->active_list);
797 INIT_LIST_HEAD(&bdma_chan->queue);
798 INIT_LIST_HEAD(&bdma_chan->free_list);
799
800 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
801 (unsigned long)bdma_chan);
802 tasklet_disable(&bdma_chan->tasklet);
803 list_add_tail(&bdma_chan->dchan.device_node,
804 &mport->dma.channels);
805 }
806
807 dma_cap_zero(mport->dma.cap_mask);
808 dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
809 dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
810
811 mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
812 mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
813 mport->dma.device_tx_status = tsi721_tx_status;
814 mport->dma.device_issue_pending = tsi721_issue_pending;
815 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
816 mport->dma.device_control = tsi721_device_control;
817
818 err = dma_async_device_register(&mport->dma);
819 if (err)
820 dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
821
822 return err;
823}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 86c9a091a2ff..c40665a4fa33 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1121,6 +1121,87 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
1121 return 0; 1121 return 0;
1122} 1122}
1123 1123
1124#ifdef CONFIG_RAPIDIO_DMA_ENGINE
1125
1126static bool rio_chan_filter(struct dma_chan *chan, void *arg)
1127{
1128 struct rio_dev *rdev = arg;
1129
1130 /* Check that DMA device belongs to the right MPORT */
1131 return (rdev->net->hport ==
1132 container_of(chan->device, struct rio_mport, dma));
1133}
1134
1135/**
1136 * rio_request_dma - request RapidIO capable DMA channel that supports
1137 * specified target RapidIO device.
1138 * @rdev: RIO device control structure
1139 *
1140 * Returns pointer to allocated DMA channel or NULL if failed.
1141 */
1142struct dma_chan *rio_request_dma(struct rio_dev *rdev)
1143{
1144 dma_cap_mask_t mask;
1145 struct dma_chan *dchan;
1146
1147 dma_cap_zero(mask);
1148 dma_cap_set(DMA_SLAVE, mask);
1149 dchan = dma_request_channel(mask, rio_chan_filter, rdev);
1150
1151 return dchan;
1152}
1153EXPORT_SYMBOL_GPL(rio_request_dma);
1154
1155/**
1156 * rio_release_dma - release specified DMA channel
1157 * @dchan: DMA channel to release
1158 */
1159void rio_release_dma(struct dma_chan *dchan)
1160{
1161 dma_release_channel(dchan);
1162}
1163EXPORT_SYMBOL_GPL(rio_release_dma);
1164
1165/**
1166 * rio_dma_prep_slave_sg - RapidIO specific wrapper
1167 * for device_prep_slave_sg callback defined by DMAENGINE.
1168 * @rdev: RIO device control structure
1169 * @dchan: DMA channel to configure
1170 * @data: RIO specific data descriptor
1171 * @direction: DMA data transfer direction (TO or FROM the device)
1172 * @flags: dmaengine defined flags
1173 *
1174 * Initializes RapidIO capable DMA channel for the specified data transfer.
1175 * Uses DMA channel private extension to pass information related to remote
1176 * target RIO device.
1177 * Returns pointer to DMA transaction descriptor or NULL if failed.
1178 */
1179struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
1180 struct dma_chan *dchan, struct rio_dma_data *data,
1181 enum dma_transfer_direction direction, unsigned long flags)
1182{
1183 struct dma_async_tx_descriptor *txd = NULL;
1184 struct rio_dma_ext rio_ext;
1185
1186 if (dchan->device->device_prep_slave_sg == NULL) {
1187 pr_err("%s: prep_rio_sg == NULL\n", __func__);
1188 return NULL;
1189 }
1190
1191 rio_ext.destid = rdev->destid;
1192 rio_ext.rio_addr_u = data->rio_addr_u;
1193 rio_ext.rio_addr = data->rio_addr;
1194 rio_ext.wr_type = data->wr_type;
1195
1196 txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
1197 direction, flags, &rio_ext);
1198
1199 return txd;
1200}
1201EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
1202
1203#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1204
1124static void rio_fixup_device(struct rio_dev *dev) 1205static void rio_fixup_device(struct rio_dev *dev)
1125{ 1206{
1126} 1207}
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index e1b8c54ace5a..a739f5ca936a 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -794,17 +794,17 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
794} 794}
795 795
796static struct of_regulator_match ab8500_regulator_matches[] = { 796static struct of_regulator_match ab8500_regulator_matches[] = {
797 { .name = "LDO-AUX1", .driver_data = (void *) AB8500_LDO_AUX1, }, 797 { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, },
798 { .name = "LDO-AUX2", .driver_data = (void *) AB8500_LDO_AUX2, }, 798 { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, },
799 { .name = "LDO-AUX3", .driver_data = (void *) AB8500_LDO_AUX3, }, 799 { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8500_LDO_AUX3, },
800 { .name = "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, }, 800 { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
801 { .name = "LDO-TVOUT", .driver_data = (void *) AB8500_LDO_TVOUT, }, 801 { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8500_LDO_TVOUT, },
802 { .name = "LDO-USB", .driver_data = (void *) AB8500_LDO_USB, }, 802 { .name = "ab8500_ldo_usb", .driver_data = (void *) AB8500_LDO_USB, },
803 { .name = "LDO-AUDIO", .driver_data = (void *) AB8500_LDO_AUDIO, }, 803 { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8500_LDO_AUDIO, },
804 { .name = "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, }, 804 { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
805 { .name = "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, }, 805 { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
806 { .name = "LDO-DMIC", .driver_data = (void *) AB8500_LDO_DMIC, }, 806 { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8500_LDO_DMIC, },
807 { .name = "LDO-ANA", .driver_data = (void *) AB8500_LDO_ANA, }, 807 { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, },
808}; 808};
809 809
810static __devinit int 810static __devinit int
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 3660bace123c..e82e7eaac0f1 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -224,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = {
224 .of_match_table = of_anatop_regulator_match_tbl, 224 .of_match_table = of_anatop_regulator_match_tbl,
225 }, 225 },
226 .probe = anatop_regulator_probe, 226 .probe = anatop_regulator_probe,
227 .remove = anatop_regulator_remove, 227 .remove = __devexit_p(anatop_regulator_remove),
228}; 228};
229 229
230static int __init anatop_regulator_init(void) 230static int __init anatop_regulator_init(void)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7584a74eec8a..09a737c868b5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
2050 return -EINVAL; 2050 return -EINVAL;
2051 } 2051 }
2052 2052
2053 if (min_uV < rdev->desc->min_uV)
2054 min_uV = rdev->desc->min_uV;
2055
2053 ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); 2056 ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
2054 if (ret < 0) 2057 if (ret < 0)
2055 return ret; 2058 return ret;
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 968f97f3cb3d..9dbb491b6efa 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -452,26 +452,26 @@ static __devinit int db8500_regulator_register(struct platform_device *pdev,
452} 452}
453 453
454static struct of_regulator_match db8500_regulator_matches[] = { 454static struct of_regulator_match db8500_regulator_matches[] = {
455 { .name = "db8500-vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, }, 455 { .name = "db8500_vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
456 { .name = "db8500-varm", .driver_data = (void *) DB8500_REGULATOR_VARM, }, 456 { .name = "db8500_varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
457 { .name = "db8500-vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, }, 457 { .name = "db8500_vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
458 { .name = "db8500-vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, }, 458 { .name = "db8500_vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
459 { .name = "db8500-vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, }, 459 { .name = "db8500_vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
460 { .name = "db8500-vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, }, 460 { .name = "db8500_vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
461 { .name = "db8500-vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, }, 461 { .name = "db8500_vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
462 { .name = "db8500-vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, }, 462 { .name = "db8500_vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
463 { .name = "db8500-sva-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, }, 463 { .name = "db8500_sva_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
464 { .name = "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, }, 464 { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
465 { .name = "db8500-sva-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, }, 465 { .name = "db8500_sva_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
466 { .name = "db8500-sia-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, }, 466 { .name = "db8500_sia_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
467 { .name = "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, }, 467 { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
468 { .name = "db8500-sia-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, }, 468 { .name = "db8500_sia_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
469 { .name = "db8500-sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, }, 469 { .name = "db8500_sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
470 { .name = "db8500-b2r2-mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, }, 470 { .name = "db8500_b2r2_mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
471 { .name = "db8500-esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, }, 471 { .name = "db8500_esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
472 { .name = "db8500-esram12-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, }, 472 { .name = "db8500_esram12_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
473 { .name = "db8500-esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, }, 473 { .name = "db8500_esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
474 { .name = "db8500-esram34-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, }, 474 { .name = "db8500_esram34_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
475}; 475};
476 476
477static __devinit int 477static __devinit int
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 9997d7aaca84..242851a4c1a6 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)
101} 101}
102 102
103static int gpio_regulator_set_value(struct regulator_dev *dev, 103static int gpio_regulator_set_value(struct regulator_dev *dev,
104 int min, int max) 104 int min, int max, unsigned *selector)
105{ 105{
106 struct gpio_regulator_data *data = rdev_get_drvdata(dev); 106 struct gpio_regulator_data *data = rdev_get_drvdata(dev);
107 int ptr, target, state, best_val = INT_MAX; 107 int ptr, target = 0, state, best_val = INT_MAX;
108 108
109 for (ptr = 0; ptr < data->nr_states; ptr++) 109 for (ptr = 0; ptr < data->nr_states; ptr++)
110 if (data->states[ptr].value < best_val && 110 if (data->states[ptr].value < best_val &&
111 data->states[ptr].value >= min && 111 data->states[ptr].value >= min &&
112 data->states[ptr].value <= max) 112 data->states[ptr].value <= max) {
113 target = data->states[ptr].gpios; 113 target = data->states[ptr].gpios;
114 best_val = data->states[ptr].value;
115 if (selector)
116 *selector = ptr;
117 }
114 118
115 if (best_val == INT_MAX) 119 if (best_val == INT_MAX)
116 return -EINVAL; 120 return -EINVAL;
@@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
128 int min_uV, int max_uV, 132 int min_uV, int max_uV,
129 unsigned *selector) 133 unsigned *selector)
130{ 134{
131 return gpio_regulator_set_value(dev, min_uV, max_uV); 135 return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
132} 136}
133 137
134static int gpio_regulator_list_voltage(struct regulator_dev *dev, 138static int gpio_regulator_list_voltage(struct regulator_dev *dev,
@@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,
145static int gpio_regulator_set_current_limit(struct regulator_dev *dev, 149static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
146 int min_uA, int max_uA) 150 int min_uA, int max_uA)
147{ 151{
148 return gpio_regulator_set_value(dev, min_uA, max_uA); 152 return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
149} 153}
150 154
151static struct regulator_ops gpio_regulator_voltage_ops = { 155static struct regulator_ops gpio_regulator_voltage_ops = {
@@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
286 290
287 cfg.dev = &pdev->dev; 291 cfg.dev = &pdev->dev;
288 cfg.init_data = config->init_data; 292 cfg.init_data = config->init_data;
289 cfg.driver_data = &drvdata; 293 cfg.driver_data = drvdata;
290 294
291 drvdata->dev = regulator_register(&drvdata->desc, &cfg); 295 drvdata->dev = regulator_register(&drvdata->desc, &cfg);
292 if (IS_ERR(drvdata->dev)) { 296 if (IS_ERR(drvdata->dev)) {
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 1f4bb80457b3..9d540cd02dab 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
259 config.dev = &client->dev; 259 config.dev = &client->dev;
260 config.init_data = pdata->regulator; 260 config.init_data = pdata->regulator;
261 config.driver_data = info; 261 config.driver_data = info;
262 config.regmap = info->regmap;
262 263
263 info->regulator = regulator_register(&dcdc_desc, &config); 264 info->regulator = regulator_register(&dcdc_desc, &config);
264 if (IS_ERR(info->regulator)) { 265 if (IS_ERR(info->regulator)) {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index c4435f608df7..9b7ca90057d5 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -775,9 +775,6 @@ static __devinit int palmas_probe(struct platform_device *pdev)
775err_unregister_regulator: 775err_unregister_regulator:
776 while (--id >= 0) 776 while (--id >= 0)
777 regulator_unregister(pmic->rdev[id]); 777 regulator_unregister(pmic->rdev[id]);
778 kfree(pmic->rdev);
779 kfree(pmic->desc);
780 kfree(pmic);
781 return ret; 778 return ret;
782} 779}
783 780
@@ -788,10 +785,6 @@ static int __devexit palmas_remove(struct platform_device *pdev)
788 785
789 for (id = 0; id < PALMAS_NUM_REGS; id++) 786 for (id = 0; id < PALMAS_NUM_REGS; id++)
790 regulator_unregister(pmic->rdev[id]); 787 regulator_unregister(pmic->rdev[id]);
791
792 kfree(pmic->rdev);
793 kfree(pmic->desc);
794 kfree(pmic);
795 return 0; 788 return 0;
796} 789}
797 790
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 290d6fc01029..9caadb482178 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -451,7 +451,7 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
451 451
452 desc = reg_voltage_map[reg_id]; 452 desc = reg_voltage_map[reg_id];
453 453
454 if (old_sel < new_sel) 454 if ((old_sel < new_sel) && s5m8767->ramp_delay)
455 return DIV_ROUND_UP(desc->step * (new_sel - old_sel), 455 return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
456 s5m8767->ramp_delay * 1000); 456 s5m8767->ramp_delay * 1000);
457 return 0; 457 return 0;
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 69425c4e86f3..de138e30d3e6 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -182,7 +182,7 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev)
182 182
183 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 183 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
184 if (ret) { 184 if (ret) {
185 dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret); 185 dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
186 return ret; 186 return ret;
187 } 187 }
188 188
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 8ea7bccc7100..66324ee4678f 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -247,7 +247,7 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
247 } 247 }
248 248
249 if (offset + filesz > len) { 249 if (offset + filesz > len) {
250 dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n", 250 dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
251 offset + filesz, len); 251 offset + filesz, len);
252 ret = -EINVAL; 252 ret = -EINVAL;
253 break; 253 break;
@@ -934,7 +934,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
934 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); 934 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
935 if (unmapped != entry->len) { 935 if (unmapped != entry->len) {
936 /* nothing much to do besides complaining */ 936 /* nothing much to do besides complaining */
937 dev_err(dev, "failed to unmap %u/%u\n", entry->len, 937 dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
938 unmapped); 938 unmapped);
939 } 939 }
940 940
@@ -1020,7 +1020,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
1020 1020
1021 ehdr = (struct elf32_hdr *)fw->data; 1021 ehdr = (struct elf32_hdr *)fw->data;
1022 1022
1023 dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size); 1023 dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
1024 1024
1025 /* 1025 /*
1026 * if enabling an IOMMU isn't relevant for this rproc, this is 1026 * if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1041,8 +1041,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
1041 1041
1042 /* look for the resource table */ 1042 /* look for the resource table */
1043 table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); 1043 table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
1044 if (!table) 1044 if (!table) {
1045 ret = -EINVAL;
1045 goto clean_up; 1046 goto clean_up;
1047 }
1046 1048
1047 /* handle fw resources which are required to boot rproc */ 1049 /* handle fw resources which are required to boot rproc */
1048 ret = rproc_handle_boot_rsc(rproc, table, tablesz); 1050 ret = rproc_handle_boot_rsc(rproc, table, tablesz);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7d5f56edb8ef..4267789ca995 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev)
910 910
911static u32 rtc_handler(void *context) 911static u32 rtc_handler(void *context)
912{ 912{
913 struct device *dev = context;
914
915 pm_wakeup_event(dev, 0);
913 acpi_clear_event(ACPI_EVENT_RTC); 916 acpi_clear_event(ACPI_EVENT_RTC);
914 acpi_disable_event(ACPI_EVENT_RTC, 0); 917 acpi_disable_event(ACPI_EVENT_RTC, 0);
915 return ACPI_INTERRUPT_HANDLED; 918 return ACPI_INTERRUPT_HANDLED;
916} 919}
917 920
918static inline void rtc_wake_setup(void) 921static inline void rtc_wake_setup(struct device *dev)
919{ 922{
920 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); 923 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
921 /* 924 /*
922 * After the RTC handler is installed, the Fixed_RTC event should 925 * After the RTC handler is installed, the Fixed_RTC event should
923 * be disabled. Only when the RTC alarm is set will it be enabled. 926 * be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev)
950 if (acpi_disabled) 953 if (acpi_disabled)
951 return; 954 return;
952 955
953 rtc_wake_setup(); 956 rtc_wake_setup(dev);
954 acpi_rtc_info.wake_on = rtc_wake_on; 957 acpi_rtc_info.wake_on = rtc_wake_on;
955 acpi_rtc_info.wake_off = rtc_wake_off; 958 acpi_rtc_info.wake_off = rtc_wake_off;
956 959
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 33a6743ddc55..c05da00583f0 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -10,8 +10,6 @@
10#ifndef DASD_INT_H 10#ifndef DASD_INT_H
11#define DASD_INT_H 11#define DASD_INT_H
12 12
13#ifdef __KERNEL__
14
15/* we keep old device allocation scheme; IOW, minors are still in 0..255 */ 13/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
16#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS)) 14#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
17#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1) 15#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -791,6 +789,4 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
791#define dasd_eer_enabled(d) (0) 789#define dasd_eer_enabled(d) (0)
792#endif /* CONFIG_DASD_ERR */ 790#endif /* CONFIG_DASD_ERR */
793 791
794#endif /* __KERNEL__ */
795
796#endif /* DASD_H */ 792#endif /* DASD_H */
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 69e6c50d4cfb..50f7115990ff 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -211,7 +211,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
211 sccb.evbuf.event_qual = EQ_STORE_DATA; 211 sccb.evbuf.event_qual = EQ_STORE_DATA;
212 sccb.evbuf.data_id = DI_FCP_DUMP; 212 sccb.evbuf.data_id = DI_FCP_DUMP;
213 sccb.evbuf.event_id = 4712; 213 sccb.evbuf.event_id = 4712;
214#ifdef __s390x__ 214#ifdef CONFIG_64BIT
215 sccb.evbuf.asa_size = ASA_SIZE_64; 215 sccb.evbuf.asa_size = ASA_SIZE_64;
216#else 216#else
217 sccb.evbuf.asa_size = ASA_SIZE_32; 217 sccb.evbuf.asa_size = ASA_SIZE_32;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 01bb04cd9e75..2a096795b9aa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -571,13 +571,12 @@ free_cmd:
571static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd, 571static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
572 int iscsi_cmd, int size) 572 int iscsi_cmd, int size)
573{ 573{
574 cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size), 574 cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
575 &cmd->dma);
576 if (!cmd->va) { 575 if (!cmd->va) {
577 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n"); 576 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
578 return -ENOMEM; 577 return -ENOMEM;
579 } 578 }
580 memset(cmd->va, 0, sizeof(size)); 579 memset(cmd->va, 0, size);
581 cmd->size = size; 580 cmd->size = size;
582 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); 581 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
583 return 0; 582 return 0;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 8b6c6bf7837e..b83927440171 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
426 vshost = vport->drv_port.im_port->shost; 426 vshost = vport->drv_port.im_port->shost;
427 fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); 427 fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
428 fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); 428 fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
429 fc_host_supported_classes(vshost) = FC_COS_CLASS3;
430
431 memset(fc_host_supported_fc4s(vshost), 0,
432 sizeof(fc_host_supported_fc4s(vshost)));
433
434 /* For FCP type 0x08 */
435 if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
436 fc_host_supported_fc4s(vshost)[2] = 1;
437
438 /* For fibre channel services type 0x20 */
439 fc_host_supported_fc4s(vshost)[7] = 1;
440
441 fc_host_supported_speeds(vshost) =
442 bfad_im_supported_speeds(&bfad->bfa);
443 fc_host_maxframe_size(vshost) =
444 bfa_fcport_get_maxfrsize(&bfad->bfa);
445
429 fc_vport->dd_data = vport; 446 fc_vport->dd_data = vport;
430 vport->drv_port.im_port->fc_vport = fc_vport; 447 vport->drv_port.im_port->fc_vport = fc_vport;
431 } else if (rc == BFA_STATUS_INVALID_WWN) 448 } else if (rc == BFA_STATUS_INVALID_WWN)
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 3153923f5b60..1ac09afe35ee 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -987,7 +987,7 @@ done:
987 return 0; 987 return 0;
988} 988}
989 989
990static u32 990u32
991bfad_im_supported_speeds(struct bfa_s *bfa) 991bfad_im_supported_speeds(struct bfa_s *bfa)
992{ 992{
993 struct bfa_ioc_attr_s *ioc_attr; 993 struct bfa_ioc_attr_s *ioc_attr;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 0814367ef101..f6c1023e502a 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -37,6 +37,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
37 struct bfad_im_port_s *im_port, struct device *dev); 37 struct bfad_im_port_s *im_port, struct device *dev);
38void bfad_im_scsi_host_free(struct bfad_s *bfad, 38void bfad_im_scsi_host_free(struct bfad_s *bfad,
39 struct bfad_im_port_s *im_port); 39 struct bfad_im_port_s *im_port);
40u32 bfad_im_supported_speeds(struct bfa_s *bfa);
40 41
41#define MAX_FCP_TARGET 1024 42#define MAX_FCP_TARGET 1024
42#define MAX_FCP_LUN 16384 43#define MAX_FCP_LUN 16384
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index a4953ef9e53a..0578fa0dc14b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
62#include "bnx2fc_constants.h" 62#include "bnx2fc_constants.h"
63 63
64#define BNX2FC_NAME "bnx2fc" 64#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.10" 65#define BNX2FC_VERSION "1.0.11"
66 66
67#define PFX "bnx2fc: " 67#define PFX "bnx2fc: "
68 68
@@ -228,13 +228,16 @@ struct bnx2fc_interface {
228 struct packet_type fip_packet_type; 228 struct packet_type fip_packet_type;
229 struct workqueue_struct *timer_work_queue; 229 struct workqueue_struct *timer_work_queue;
230 struct kref kref; 230 struct kref kref;
231 struct fcoe_ctlr ctlr;
232 u8 vlan_enabled; 231 u8 vlan_enabled;
233 int vlan_id; 232 int vlan_id;
234 bool enabled; 233 bool enabled;
235}; 234};
236 235
237#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr) 236#define bnx2fc_from_ctlr(x) \
237 ((struct bnx2fc_interface *)((x) + 1))
238
239#define bnx2fc_to_ctlr(x) \
240 ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
238 241
239struct bnx2fc_lport { 242struct bnx2fc_lport {
240 struct list_head list; 243 struct list_head list;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index ce0ce3e32f33..bdbbb13b8534 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
854 struct fc_exch *exch = fc_seq_exch(seq); 854 struct fc_exch *exch = fc_seq_exch(seq);
855 struct fc_lport *lport = exch->lp; 855 struct fc_lport *lport = exch->lp;
856 u8 *mac; 856 u8 *mac;
857 struct fc_frame_header *fh;
858 u8 op; 857 u8 op;
859 858
860 if (IS_ERR(fp)) 859 if (IS_ERR(fp))
@@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
862 861
863 mac = fr_cb(fp)->granted_mac; 862 mac = fr_cb(fp)->granted_mac;
864 if (is_zero_ether_addr(mac)) { 863 if (is_zero_ether_addr(mac)) {
865 fh = fc_frame_header_get(fp);
866 if (fh->fh_type != FC_TYPE_ELS) {
867 printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
868 "fh_type != FC_TYPE_ELS\n");
869 fc_frame_free(fp);
870 return;
871 }
872 op = fc_frame_payload_op(fp); 864 op = fc_frame_payload_op(fp);
873 if (lport->vport) { 865 if (lport->vport) {
874 if (op == ELS_LS_RJT) { 866 if (op == ELS_LS_RJT) {
@@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
878 return; 870 return;
879 } 871 }
880 } 872 }
881 if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { 873 fcoe_ctlr_recv_flogi(fip, lport, fp);
882 fc_frame_free(fp);
883 return;
884 }
885 } 874 }
886 fip->update_mac(lport, mac); 875 if (!is_zero_ether_addr(mac))
876 fip->update_mac(lport, mac);
887done: 877done:
888 fc_lport_flogi_resp(seq, fp, lport); 878 fc_lport_flogi_resp(seq, fp, lport);
889} 879}
@@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
910{ 900{
911 struct fcoe_port *port = lport_priv(lport); 901 struct fcoe_port *port = lport_priv(lport);
912 struct bnx2fc_interface *interface = port->priv; 902 struct bnx2fc_interface *interface = port->priv;
913 struct fcoe_ctlr *fip = &interface->ctlr; 903 struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
914 struct fc_frame_header *fh = fc_frame_header_get(fp); 904 struct fc_frame_header *fh = fc_frame_header_get(fp);
915 905
916 switch (op) { 906 switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index c1c6a92a0b98..f52f668fd247 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Jan 22, 2011" 25#define DRV_MODULE_RELDATE "Apr 24, 2012"
26 26
27 27
28static char version[] __devinitdata = 28static char version[] __devinitdata =
@@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb;
54static struct libfc_function_template bnx2fc_libfc_fcn_templ; 54static struct libfc_function_template bnx2fc_libfc_fcn_templ;
55static struct scsi_host_template bnx2fc_shost_template; 55static struct scsi_host_template bnx2fc_shost_template;
56static struct fc_function_template bnx2fc_transport_function; 56static struct fc_function_template bnx2fc_transport_function;
57static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
57static struct fc_function_template bnx2fc_vport_xport_function; 58static struct fc_function_template bnx2fc_vport_xport_function;
58static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); 59static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
59static void __bnx2fc_destroy(struct bnx2fc_interface *interface); 60static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
88static void bnx2fc_stop(struct bnx2fc_interface *interface); 89static void bnx2fc_stop(struct bnx2fc_interface *interface);
89static int __init bnx2fc_mod_init(void); 90static int __init bnx2fc_mod_init(void);
90static void __exit bnx2fc_mod_exit(void); 91static void __exit bnx2fc_mod_exit(void);
92static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
91 93
92unsigned int bnx2fc_debug_level; 94unsigned int bnx2fc_debug_level;
93module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); 95module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport,
118 __fcoe_get_lesb(lport, fc_lesb, netdev); 120 __fcoe_get_lesb(lport, fc_lesb, netdev);
119} 121}
120 122
123static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
124{
125 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
126 struct net_device *netdev = bnx2fc_netdev(fip->lp);
127 struct fcoe_fc_els_lesb *fcoe_lesb;
128 struct fc_els_lesb fc_lesb;
129
130 __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
131 fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
132
133 ctlr_dev->lesb.lesb_link_fail =
134 ntohl(fcoe_lesb->lesb_link_fail);
135 ctlr_dev->lesb.lesb_vlink_fail =
136 ntohl(fcoe_lesb->lesb_vlink_fail);
137 ctlr_dev->lesb.lesb_miss_fka =
138 ntohl(fcoe_lesb->lesb_miss_fka);
139 ctlr_dev->lesb.lesb_symb_err =
140 ntohl(fcoe_lesb->lesb_symb_err);
141 ctlr_dev->lesb.lesb_err_block =
142 ntohl(fcoe_lesb->lesb_err_block);
143 ctlr_dev->lesb.lesb_fcs_error =
144 ntohl(fcoe_lesb->lesb_fcs_error);
145}
146EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
147
148static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
149{
150 struct fcoe_ctlr_device *ctlr_dev =
151 fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
152 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
153 struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
154
155 fcf_dev->vlan_id = fcoe->vlan_id;
156}
157
121static void bnx2fc_clean_rx_queue(struct fc_lport *lp) 158static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
122{ 159{
123 struct fcoe_percpu_s *bg; 160 struct fcoe_percpu_s *bg;
@@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
244 struct sk_buff *skb; 281 struct sk_buff *skb;
245 struct fc_frame_header *fh; 282 struct fc_frame_header *fh;
246 struct bnx2fc_interface *interface; 283 struct bnx2fc_interface *interface;
284 struct fcoe_ctlr *ctlr;
247 struct bnx2fc_hba *hba; 285 struct bnx2fc_hba *hba;
248 struct fcoe_port *port; 286 struct fcoe_port *port;
249 struct fcoe_hdr *hp; 287 struct fcoe_hdr *hp;
@@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
256 294
257 port = (struct fcoe_port *)lport_priv(lport); 295 port = (struct fcoe_port *)lport_priv(lport);
258 interface = port->priv; 296 interface = port->priv;
297 ctlr = bnx2fc_to_ctlr(interface);
259 hba = interface->hba; 298 hba = interface->hba;
260 299
261 fh = fc_frame_header_get(fp); 300 fh = fc_frame_header_get(fp);
@@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
268 } 307 }
269 308
270 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { 309 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
271 if (!interface->ctlr.sel_fcf) { 310 if (!ctlr->sel_fcf) {
272 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); 311 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
273 kfree_skb(skb); 312 kfree_skb(skb);
274 return -EINVAL; 313 return -EINVAL;
275 } 314 }
276 if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb)) 315 if (fcoe_ctlr_els_send(ctlr, lport, skb))
277 return 0; 316 return 0;
278 } 317 }
279 318
@@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
346 /* fill up mac and fcoe headers */ 385 /* fill up mac and fcoe headers */
347 eh = eth_hdr(skb); 386 eh = eth_hdr(skb);
348 eh->h_proto = htons(ETH_P_FCOE); 387 eh->h_proto = htons(ETH_P_FCOE);
349 if (interface->ctlr.map_dest) 388 if (ctlr->map_dest)
350 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 389 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
351 else 390 else
352 /* insert GW address */ 391 /* insert GW address */
353 memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN); 392 memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
354 393
355 if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 394 if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
356 memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN); 395 memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
357 else 396 else
358 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 397 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
359 398
@@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
403{ 442{
404 struct fc_lport *lport; 443 struct fc_lport *lport;
405 struct bnx2fc_interface *interface; 444 struct bnx2fc_interface *interface;
445 struct fcoe_ctlr *ctlr;
406 struct fc_frame_header *fh; 446 struct fc_frame_header *fh;
407 struct fcoe_rcv_info *fr; 447 struct fcoe_rcv_info *fr;
408 struct fcoe_percpu_s *bg; 448 struct fcoe_percpu_s *bg;
@@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
410 450
411 interface = container_of(ptype, struct bnx2fc_interface, 451 interface = container_of(ptype, struct bnx2fc_interface,
412 fcoe_packet_type); 452 fcoe_packet_type);
413 lport = interface->ctlr.lp; 453 ctlr = bnx2fc_to_ctlr(interface);
454 lport = ctlr->lp;
414 455
415 if (unlikely(lport == NULL)) { 456 if (unlikely(lport == NULL)) {
416 printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n"); 457 printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
758{ 799{
759 struct bnx2fc_hba *hba; 800 struct bnx2fc_hba *hba;
760 struct bnx2fc_interface *interface; 801 struct bnx2fc_interface *interface;
802 struct fcoe_ctlr *ctlr;
761 struct fcoe_port *port; 803 struct fcoe_port *port;
762 u64 wwnn, wwpn; 804 u64 wwnn, wwpn;
763 805
764 port = lport_priv(lport); 806 port = lport_priv(lport);
765 interface = port->priv; 807 interface = port->priv;
808 ctlr = bnx2fc_to_ctlr(interface);
766 hba = interface->hba; 809 hba = interface->hba;
767 810
768 /* require support for get_pauseparam ethtool op. */ 811 /* require support for get_pauseparam ethtool op. */
@@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
781 824
782 if (!lport->vport) { 825 if (!lport->vport) {
783 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 826 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
784 wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 827 wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
785 1, 0); 828 1, 0);
786 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); 829 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
787 fc_set_wwnn(lport, wwnn); 830 fc_set_wwnn(lport, wwnn);
788 831
789 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 832 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
790 wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 833 wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
791 2, 0); 834 2, 0);
792 835
793 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); 836 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
824 struct fc_lport *lport; 867 struct fc_lport *lport;
825 struct fc_lport *vport; 868 struct fc_lport *vport;
826 struct bnx2fc_interface *interface, *tmp; 869 struct bnx2fc_interface *interface, *tmp;
870 struct fcoe_ctlr *ctlr;
827 int wait_for_upload = 0; 871 int wait_for_upload = 0;
828 u32 link_possible = 1; 872 u32 link_possible = 1;
829 873
@@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
874 if (interface->hba != hba) 918 if (interface->hba != hba)
875 continue; 919 continue;
876 920
877 lport = interface->ctlr.lp; 921 ctlr = bnx2fc_to_ctlr(interface);
922 lport = ctlr->lp;
878 BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", 923 BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
879 interface->netdev->name, event); 924 interface->netdev->name, event);
880 925
@@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
889 * on a stale vlan 934 * on a stale vlan
890 */ 935 */
891 if (interface->enabled) 936 if (interface->enabled)
892 fcoe_ctlr_link_up(&interface->ctlr); 937 fcoe_ctlr_link_up(ctlr);
893 } else if (fcoe_ctlr_link_down(&interface->ctlr)) { 938 } else if (fcoe_ctlr_link_down(ctlr)) {
894 mutex_lock(&lport->lp_mutex); 939 mutex_lock(&lport->lp_mutex);
895 list_for_each_entry(vport, &lport->vports, list) 940 list_for_each_entry(vport, &lport->vports, list)
896 fc_host_port_type(vport->host) = 941 fc_host_port_type(vport->host) =
@@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
995 struct net_device *orig_dev) 1040 struct net_device *orig_dev)
996{ 1041{
997 struct bnx2fc_interface *interface; 1042 struct bnx2fc_interface *interface;
1043 struct fcoe_ctlr *ctlr;
998 interface = container_of(ptype, struct bnx2fc_interface, 1044 interface = container_of(ptype, struct bnx2fc_interface,
999 fip_packet_type); 1045 fip_packet_type);
1000 fcoe_ctlr_recv(&interface->ctlr, skb); 1046 ctlr = bnx2fc_to_ctlr(interface);
1047 fcoe_ctlr_recv(ctlr, skb);
1001 return 0; 1048 return 0;
1002} 1049}
1003 1050
@@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
1155{ 1202{
1156 struct net_device *netdev = interface->netdev; 1203 struct net_device *netdev = interface->netdev;
1157 struct net_device *physdev = interface->hba->phys_dev; 1204 struct net_device *physdev = interface->hba->phys_dev;
1205 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1158 struct netdev_hw_addr *ha; 1206 struct netdev_hw_addr *ha;
1159 int sel_san_mac = 0; 1207 int sel_san_mac = 0;
1160 1208
@@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
1169 1217
1170 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 1218 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
1171 (is_valid_ether_addr(ha->addr))) { 1219 (is_valid_ether_addr(ha->addr))) {
1172 memcpy(interface->ctlr.ctl_src_addr, ha->addr, 1220 memcpy(ctlr->ctl_src_addr, ha->addr,
1173 ETH_ALEN); 1221 ETH_ALEN);
1174 sel_san_mac = 1; 1222 sel_san_mac = 1;
1175 BNX2FC_MISC_DBG("Found SAN MAC\n"); 1223 BNX2FC_MISC_DBG("Found SAN MAC\n");
@@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void)
1224 1272
1225static void bnx2fc_interface_release(struct kref *kref) 1273static void bnx2fc_interface_release(struct kref *kref)
1226{ 1274{
1275 struct fcoe_ctlr_device *ctlr_dev;
1227 struct bnx2fc_interface *interface; 1276 struct bnx2fc_interface *interface;
1277 struct fcoe_ctlr *ctlr;
1228 struct net_device *netdev; 1278 struct net_device *netdev;
1229 1279
1230 interface = container_of(kref, struct bnx2fc_interface, kref); 1280 interface = container_of(kref, struct bnx2fc_interface, kref);
1231 BNX2FC_MISC_DBG("Interface is being released\n"); 1281 BNX2FC_MISC_DBG("Interface is being released\n");
1232 1282
1283 ctlr = bnx2fc_to_ctlr(interface);
1284 ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
1233 netdev = interface->netdev; 1285 netdev = interface->netdev;
1234 1286
1235 /* tear-down FIP controller */ 1287 /* tear-down FIP controller */
1236 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags)) 1288 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
1237 fcoe_ctlr_destroy(&interface->ctlr); 1289 fcoe_ctlr_destroy(ctlr);
1238 1290
1239 kfree(interface); 1291 fcoe_ctlr_device_delete(ctlr_dev);
1240 1292
1241 dev_put(netdev); 1293 dev_put(netdev);
1242 module_put(THIS_MODULE); 1294 module_put(THIS_MODULE);
@@ -1329,33 +1381,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1329 struct net_device *netdev, 1381 struct net_device *netdev,
1330 enum fip_state fip_mode) 1382 enum fip_state fip_mode)
1331{ 1383{
1384 struct fcoe_ctlr_device *ctlr_dev;
1332 struct bnx2fc_interface *interface; 1385 struct bnx2fc_interface *interface;
1386 struct fcoe_ctlr *ctlr;
1387 int size;
1333 int rc = 0; 1388 int rc = 0;
1334 1389
1335 interface = kzalloc(sizeof(*interface), GFP_KERNEL); 1390 size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
1336 if (!interface) { 1391 ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
1392 size);
1393 if (!ctlr_dev) {
1337 printk(KERN_ERR PFX "Unable to allocate interface structure\n"); 1394 printk(KERN_ERR PFX "Unable to allocate interface structure\n");
1338 return NULL; 1395 return NULL;
1339 } 1396 }
1397 ctlr = fcoe_ctlr_device_priv(ctlr_dev);
1398 interface = fcoe_ctlr_priv(ctlr);
1340 dev_hold(netdev); 1399 dev_hold(netdev);
1341 kref_init(&interface->kref); 1400 kref_init(&interface->kref);
1342 interface->hba = hba; 1401 interface->hba = hba;
1343 interface->netdev = netdev; 1402 interface->netdev = netdev;
1344 1403
1345 /* Initialize FIP */ 1404 /* Initialize FIP */
1346 fcoe_ctlr_init(&interface->ctlr, fip_mode); 1405 fcoe_ctlr_init(ctlr, fip_mode);
1347 interface->ctlr.send = bnx2fc_fip_send; 1406 ctlr->send = bnx2fc_fip_send;
1348 interface->ctlr.update_mac = bnx2fc_update_src_mac; 1407 ctlr->update_mac = bnx2fc_update_src_mac;
1349 interface->ctlr.get_src_addr = bnx2fc_get_src_mac; 1408 ctlr->get_src_addr = bnx2fc_get_src_mac;
1350 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); 1409 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
1351 1410
1352 rc = bnx2fc_interface_setup(interface); 1411 rc = bnx2fc_interface_setup(interface);
1353 if (!rc) 1412 if (!rc)
1354 return interface; 1413 return interface;
1355 1414
1356 fcoe_ctlr_destroy(&interface->ctlr); 1415 fcoe_ctlr_destroy(ctlr);
1357 dev_put(netdev); 1416 dev_put(netdev);
1358 kfree(interface); 1417 fcoe_ctlr_device_delete(ctlr_dev);
1359 return NULL; 1418 return NULL;
1360} 1419}
1361 1420
@@ -1373,6 +1432,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1373static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, 1432static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1374 struct device *parent, int npiv) 1433 struct device *parent, int npiv)
1375{ 1434{
1435 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1376 struct fc_lport *lport, *n_port; 1436 struct fc_lport *lport, *n_port;
1377 struct fcoe_port *port; 1437 struct fcoe_port *port;
1378 struct Scsi_Host *shost; 1438 struct Scsi_Host *shost;
@@ -1383,7 +1443,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1383 1443
1384 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); 1444 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
1385 if (!blport) { 1445 if (!blport) {
1386 BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n"); 1446 BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
1387 return NULL; 1447 return NULL;
1388 } 1448 }
1389 1449
@@ -1479,7 +1539,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
1479 1539
1480static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) 1540static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
1481{ 1541{
1482 struct fc_lport *lport = interface->ctlr.lp; 1542 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1543 struct fc_lport *lport = ctlr->lp;
1483 struct fcoe_port *port = lport_priv(lport); 1544 struct fcoe_port *port = lport_priv(lport);
1484 struct bnx2fc_hba *hba = interface->hba; 1545 struct bnx2fc_hba *hba = interface->hba;
1485 1546
@@ -1519,7 +1580,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1519 1580
1520static void __bnx2fc_destroy(struct bnx2fc_interface *interface) 1581static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
1521{ 1582{
1522 struct fc_lport *lport = interface->ctlr.lp; 1583 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1584 struct fc_lport *lport = ctlr->lp;
1523 struct fcoe_port *port = lport_priv(lport); 1585 struct fcoe_port *port = lport_priv(lport);
1524 1586
1525 bnx2fc_interface_cleanup(interface); 1587 bnx2fc_interface_cleanup(interface);
@@ -1543,13 +1605,15 @@ static int bnx2fc_destroy(struct net_device *netdev)
1543{ 1605{
1544 struct bnx2fc_interface *interface = NULL; 1606 struct bnx2fc_interface *interface = NULL;
1545 struct workqueue_struct *timer_work_queue; 1607 struct workqueue_struct *timer_work_queue;
1608 struct fcoe_ctlr *ctlr;
1546 int rc = 0; 1609 int rc = 0;
1547 1610
1548 rtnl_lock(); 1611 rtnl_lock();
1549 mutex_lock(&bnx2fc_dev_lock); 1612 mutex_lock(&bnx2fc_dev_lock);
1550 1613
1551 interface = bnx2fc_interface_lookup(netdev); 1614 interface = bnx2fc_interface_lookup(netdev);
1552 if (!interface || !interface->ctlr.lp) { 1615 ctlr = bnx2fc_to_ctlr(interface);
1616 if (!interface || !ctlr->lp) {
1553 rc = -ENODEV; 1617 rc = -ENODEV;
1554 printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n"); 1618 printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
1555 goto netdev_err; 1619 goto netdev_err;
@@ -1646,6 +1710,7 @@ static void bnx2fc_ulp_start(void *handle)
1646{ 1710{
1647 struct bnx2fc_hba *hba = handle; 1711 struct bnx2fc_hba *hba = handle;
1648 struct bnx2fc_interface *interface; 1712 struct bnx2fc_interface *interface;
1713 struct fcoe_ctlr *ctlr;
1649 struct fc_lport *lport; 1714 struct fc_lport *lport;
1650 1715
1651 mutex_lock(&bnx2fc_dev_lock); 1716 mutex_lock(&bnx2fc_dev_lock);
@@ -1657,7 +1722,8 @@ static void bnx2fc_ulp_start(void *handle)
1657 1722
1658 list_for_each_entry(interface, &if_list, list) { 1723 list_for_each_entry(interface, &if_list, list) {
1659 if (interface->hba == hba) { 1724 if (interface->hba == hba) {
1660 lport = interface->ctlr.lp; 1725 ctlr = bnx2fc_to_ctlr(interface);
1726 lport = ctlr->lp;
1661 /* Kick off Fabric discovery*/ 1727 /* Kick off Fabric discovery*/
1662 printk(KERN_ERR PFX "ulp_init: start discovery\n"); 1728 printk(KERN_ERR PFX "ulp_init: start discovery\n");
1663 lport->tt.frame_send = bnx2fc_xmit; 1729 lport->tt.frame_send = bnx2fc_xmit;
@@ -1677,13 +1743,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
1677 1743
1678static void bnx2fc_stop(struct bnx2fc_interface *interface) 1744static void bnx2fc_stop(struct bnx2fc_interface *interface)
1679{ 1745{
1746 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1680 struct fc_lport *lport; 1747 struct fc_lport *lport;
1681 struct fc_lport *vport; 1748 struct fc_lport *vport;
1682 1749
1683 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) 1750 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
1684 return; 1751 return;
1685 1752
1686 lport = interface->ctlr.lp; 1753 lport = ctlr->lp;
1687 bnx2fc_port_shutdown(lport); 1754 bnx2fc_port_shutdown(lport);
1688 1755
1689 mutex_lock(&lport->lp_mutex); 1756 mutex_lock(&lport->lp_mutex);
@@ -1692,7 +1759,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface)
1692 FC_PORTTYPE_UNKNOWN; 1759 FC_PORTTYPE_UNKNOWN;
1693 mutex_unlock(&lport->lp_mutex); 1760 mutex_unlock(&lport->lp_mutex);
1694 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; 1761 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1695 fcoe_ctlr_link_down(&interface->ctlr); 1762 fcoe_ctlr_link_down(ctlr);
1696 fcoe_clean_pending_queue(lport); 1763 fcoe_clean_pending_queue(lport);
1697} 1764}
1698 1765
@@ -1804,6 +1871,7 @@ exit:
1804 1871
1805static void bnx2fc_start_disc(struct bnx2fc_interface *interface) 1872static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1806{ 1873{
1874 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1807 struct fc_lport *lport; 1875 struct fc_lport *lport;
1808 int wait_cnt = 0; 1876 int wait_cnt = 0;
1809 1877
@@ -1814,18 +1882,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1814 return; 1882 return;
1815 } 1883 }
1816 1884
1817 lport = interface->ctlr.lp; 1885 lport = ctlr->lp;
1818 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); 1886 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
1819 1887
1820 if (!bnx2fc_link_ok(lport) && interface->enabled) { 1888 if (!bnx2fc_link_ok(lport) && interface->enabled) {
1821 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); 1889 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
1822 fcoe_ctlr_link_up(&interface->ctlr); 1890 fcoe_ctlr_link_up(ctlr);
1823 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1891 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1824 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); 1892 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
1825 } 1893 }
1826 1894
1827 /* wait for the FCF to be selected before issuing FLOGI */ 1895 /* wait for the FCF to be selected before issuing FLOGI */
1828 while (!interface->ctlr.sel_fcf) { 1896 while (!ctlr->sel_fcf) {
1829 msleep(250); 1897 msleep(250);
1830 /* give up after 3 secs */ 1898 /* give up after 3 secs */
1831 if (++wait_cnt > 12) 1899 if (++wait_cnt > 12)
@@ -1889,19 +1957,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1889static int bnx2fc_disable(struct net_device *netdev) 1957static int bnx2fc_disable(struct net_device *netdev)
1890{ 1958{
1891 struct bnx2fc_interface *interface; 1959 struct bnx2fc_interface *interface;
1960 struct fcoe_ctlr *ctlr;
1892 int rc = 0; 1961 int rc = 0;
1893 1962
1894 rtnl_lock(); 1963 rtnl_lock();
1895 mutex_lock(&bnx2fc_dev_lock); 1964 mutex_lock(&bnx2fc_dev_lock);
1896 1965
1897 interface = bnx2fc_interface_lookup(netdev); 1966 interface = bnx2fc_interface_lookup(netdev);
1898 if (!interface || !interface->ctlr.lp) { 1967 ctlr = bnx2fc_to_ctlr(interface);
1968 if (!interface || !ctlr->lp) {
1899 rc = -ENODEV; 1969 rc = -ENODEV;
1900 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); 1970 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
1901 } else { 1971 } else {
1902 interface->enabled = false; 1972 interface->enabled = false;
1903 fcoe_ctlr_link_down(&interface->ctlr); 1973 fcoe_ctlr_link_down(ctlr);
1904 fcoe_clean_pending_queue(interface->ctlr.lp); 1974 fcoe_clean_pending_queue(ctlr->lp);
1905 } 1975 }
1906 1976
1907 mutex_unlock(&bnx2fc_dev_lock); 1977 mutex_unlock(&bnx2fc_dev_lock);
@@ -1913,17 +1983,19 @@ static int bnx2fc_disable(struct net_device *netdev)
1913static int bnx2fc_enable(struct net_device *netdev) 1983static int bnx2fc_enable(struct net_device *netdev)
1914{ 1984{
1915 struct bnx2fc_interface *interface; 1985 struct bnx2fc_interface *interface;
1986 struct fcoe_ctlr *ctlr;
1916 int rc = 0; 1987 int rc = 0;
1917 1988
1918 rtnl_lock(); 1989 rtnl_lock();
1919 mutex_lock(&bnx2fc_dev_lock); 1990 mutex_lock(&bnx2fc_dev_lock);
1920 1991
1921 interface = bnx2fc_interface_lookup(netdev); 1992 interface = bnx2fc_interface_lookup(netdev);
1922 if (!interface || !interface->ctlr.lp) { 1993 ctlr = bnx2fc_to_ctlr(interface);
1994 if (!interface || !ctlr->lp) {
1923 rc = -ENODEV; 1995 rc = -ENODEV;
1924 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); 1996 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
1925 } else if (!bnx2fc_link_ok(interface->ctlr.lp)) { 1997 } else if (!bnx2fc_link_ok(ctlr->lp)) {
1926 fcoe_ctlr_link_up(&interface->ctlr); 1998 fcoe_ctlr_link_up(ctlr);
1927 interface->enabled = true; 1999 interface->enabled = true;
1928 } 2000 }
1929 2001
@@ -1944,6 +2016,7 @@ static int bnx2fc_enable(struct net_device *netdev)
1944 */ 2016 */
1945static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) 2017static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1946{ 2018{
2019 struct fcoe_ctlr *ctlr;
1947 struct bnx2fc_interface *interface; 2020 struct bnx2fc_interface *interface;
1948 struct bnx2fc_hba *hba; 2021 struct bnx2fc_hba *hba;
1949 struct net_device *phys_dev; 2022 struct net_device *phys_dev;
@@ -2010,6 +2083,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
2010 goto ifput_err; 2083 goto ifput_err;
2011 } 2084 }
2012 2085
2086 ctlr = bnx2fc_to_ctlr(interface);
2013 interface->vlan_id = vlan_id; 2087 interface->vlan_id = vlan_id;
2014 interface->vlan_enabled = 1; 2088 interface->vlan_enabled = 1;
2015 2089
@@ -2035,10 +2109,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
2035 lport->boot_time = jiffies; 2109 lport->boot_time = jiffies;
2036 2110
2037 /* Make this master N_port */ 2111 /* Make this master N_port */
2038 interface->ctlr.lp = lport; 2112 ctlr->lp = lport;
2039 2113
2040 if (!bnx2fc_link_ok(lport)) { 2114 if (!bnx2fc_link_ok(lport)) {
2041 fcoe_ctlr_link_up(&interface->ctlr); 2115 fcoe_ctlr_link_up(ctlr);
2042 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 2116 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
2043 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); 2117 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
2044 } 2118 }
@@ -2439,6 +2513,19 @@ static void __exit bnx2fc_mod_exit(void)
2439module_init(bnx2fc_mod_init); 2513module_init(bnx2fc_mod_init);
2440module_exit(bnx2fc_mod_exit); 2514module_exit(bnx2fc_mod_exit);
2441 2515
2516static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
2517 .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
2518 .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
2519 .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
2520 .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
2521 .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
2522 .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
2523 .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
2524
2525 .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
2526 .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
2527};
2528
2442static struct fc_function_template bnx2fc_transport_function = { 2529static struct fc_function_template bnx2fc_transport_function = {
2443 .show_host_node_name = 1, 2530 .show_host_node_name = 1,
2444 .show_host_port_name = 1, 2531 .show_host_port_name = 1,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index afd570962b8c..2ca6bfe4ce5e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
167{ 167{
168 struct fc_lport *lport = port->lport; 168 struct fc_lport *lport = port->lport;
169 struct bnx2fc_interface *interface = port->priv; 169 struct bnx2fc_interface *interface = port->priv;
170 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
170 struct bnx2fc_hba *hba = interface->hba; 171 struct bnx2fc_hba *hba = interface->hba;
171 struct kwqe *kwqe_arr[4]; 172 struct kwqe *kwqe_arr[4];
172 struct fcoe_kwqe_conn_offload1 ofld_req1; 173 struct fcoe_kwqe_conn_offload1 ofld_req1;
@@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
314 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; 315 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
315 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; 316 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
316 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; 317 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
317 ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; 318 ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
318 /* fcf mac */ 319 /* fcf mac */
319 ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; 320 ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
320 ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; 321 ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
321 ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; 322 ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
322 ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; 323 ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
323 ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; 324 ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
324 325
325 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; 326 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
326 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); 327 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
351{ 352{
352 struct kwqe *kwqe_arr[2]; 353 struct kwqe *kwqe_arr[2];
353 struct bnx2fc_interface *interface = port->priv; 354 struct bnx2fc_interface *interface = port->priv;
355 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
354 struct bnx2fc_hba *hba = interface->hba; 356 struct bnx2fc_hba *hba = interface->hba;
355 struct fcoe_kwqe_conn_enable_disable enbl_req; 357 struct fcoe_kwqe_conn_enable_disable enbl_req;
356 struct fc_lport *lport = port->lport; 358 struct fc_lport *lport = port->lport;
@@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
374 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; 376 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
375 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); 377 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
376 378
377 enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; 379 enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
378 enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; 380 enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
379 enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; 381 enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
380 enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; 382 enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
381 enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; 383 enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
382 enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; 384 enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
383 385
384 port_id = fc_host_port_id(lport->host); 386 port_id = fc_host_port_id(lport->host);
385 if (port_id != tgt->sid) { 387 if (port_id != tgt->sid) {
@@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
419 struct bnx2fc_rport *tgt) 421 struct bnx2fc_rport *tgt)
420{ 422{
421 struct bnx2fc_interface *interface = port->priv; 423 struct bnx2fc_interface *interface = port->priv;
424 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
422 struct bnx2fc_hba *hba = interface->hba; 425 struct bnx2fc_hba *hba = interface->hba;
423 struct fcoe_kwqe_conn_enable_disable disable_req; 426 struct fcoe_kwqe_conn_enable_disable disable_req;
424 struct kwqe *kwqe_arr[2]; 427 struct kwqe *kwqe_arr[2];
@@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
440 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; 443 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
441 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; 444 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
442 445
443 disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; 446 disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
444 disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; 447 disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
445 disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; 448 disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
446 disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; 449 disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
447 disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; 450 disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
448 disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; 451 disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
449 452
450 port_id = tgt->sid; 453 port_id = tgt->sid;
451 disable_req.s_id[0] = (port_id & 0x000000FF); 454 disable_req.s_id[0] = (port_id & 0x000000FF);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e897ce975bb8..4f7453b9e41e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -810,8 +810,22 @@ retry_tmf:
810 spin_lock_bh(&tgt->tgt_lock); 810 spin_lock_bh(&tgt->tgt_lock);
811 811
812 io_req->wait_for_comp = 0; 812 io_req->wait_for_comp = 0;
813 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) 813 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
814 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); 814 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
815 if (io_req->on_tmf_queue) {
816 list_del_init(&io_req->link);
817 io_req->on_tmf_queue = 0;
818 }
819 io_req->wait_for_comp = 1;
820 bnx2fc_initiate_cleanup(io_req);
821 spin_unlock_bh(&tgt->tgt_lock);
822 rc = wait_for_completion_timeout(&io_req->tm_done,
823 BNX2FC_FW_TIMEOUT);
824 spin_lock_bh(&tgt->tgt_lock);
825 io_req->wait_for_comp = 0;
826 if (!rc)
827 kref_put(&io_req->refcount, bnx2fc_cmd_release);
828 }
815 829
816 spin_unlock_bh(&tgt->tgt_lock); 830 spin_unlock_bh(&tgt->tgt_lock);
817 831
@@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1089 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1103 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1090} 1104}
1091 1105
1106int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
1107{
1108 struct bnx2fc_rport *tgt = io_req->tgt;
1109 struct fc_rport_priv *rdata = tgt->rdata;
1110 int logo_issued;
1111 int rc = SUCCESS;
1112 int wait_cnt = 0;
1113
1114 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
1115 tgt->flags);
1116 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
1117 &tgt->flags);
1118 io_req->wait_for_comp = 1;
1119 bnx2fc_initiate_cleanup(io_req);
1120
1121 spin_unlock_bh(&tgt->tgt_lock);
1122
1123 wait_for_completion(&io_req->tm_done);
1124
1125 io_req->wait_for_comp = 0;
1126 /*
1127 * release the reference taken in eh_abort to allow the
1128 * target to re-login after flushing IOs
1129 */
1130 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1131
1132 if (!logo_issued) {
1133 clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
1134 mutex_lock(&lport->disc.disc_mutex);
1135 lport->tt.rport_logoff(rdata);
1136 mutex_unlock(&lport->disc.disc_mutex);
1137 do {
1138 msleep(BNX2FC_RELOGIN_WAIT_TIME);
1139 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
1140 rc = FAILED;
1141 break;
1142 }
1143 } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
1144 }
1145 spin_lock_bh(&tgt->tgt_lock);
1146 return rc;
1147}
1092/** 1148/**
1093 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding 1149 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1094 * SCSI command 1150 * SCSI command
@@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1103 struct fc_rport_libfc_priv *rp = rport->dd_data; 1159 struct fc_rport_libfc_priv *rp = rport->dd_data;
1104 struct bnx2fc_cmd *io_req; 1160 struct bnx2fc_cmd *io_req;
1105 struct fc_lport *lport; 1161 struct fc_lport *lport;
1106 struct fc_rport_priv *rdata;
1107 struct bnx2fc_rport *tgt; 1162 struct bnx2fc_rport *tgt;
1108 int logo_issued;
1109 int wait_cnt = 0;
1110 int rc = FAILED; 1163 int rc = FAILED;
1111 1164
1112 1165
@@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1183 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1236 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1184 1237
1185 init_completion(&io_req->tm_done); 1238 init_completion(&io_req->tm_done);
1186 io_req->wait_for_comp = 1;
1187 1239
1188 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 1240 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1189 /* Cancel the current timer running on this io_req */
1190 if (cancel_delayed_work(&io_req->timeout_work))
1191 kref_put(&io_req->refcount,
1192 bnx2fc_cmd_release); /* drop timer hold */
1193 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1194 rc = bnx2fc_initiate_abts(io_req);
1195 } else {
1196 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1241 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1197 "already in abts processing\n", io_req->xid); 1242 "already in abts processing\n", io_req->xid);
1198 if (cancel_delayed_work(&io_req->timeout_work)) 1243 if (cancel_delayed_work(&io_req->timeout_work))
1199 kref_put(&io_req->refcount, 1244 kref_put(&io_req->refcount,
1200 bnx2fc_cmd_release); /* drop timer hold */ 1245 bnx2fc_cmd_release); /* drop timer hold */
1201 bnx2fc_initiate_cleanup(io_req); 1246 rc = bnx2fc_expl_logo(lport, io_req);
1247 goto out;
1248 }
1202 1249
1250 /* Cancel the current timer running on this io_req */
1251 if (cancel_delayed_work(&io_req->timeout_work))
1252 kref_put(&io_req->refcount,
1253 bnx2fc_cmd_release); /* drop timer hold */
1254 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1255 io_req->wait_for_comp = 1;
1256 rc = bnx2fc_initiate_abts(io_req);
1257 if (rc == FAILED) {
1258 bnx2fc_initiate_cleanup(io_req);
1203 spin_unlock_bh(&tgt->tgt_lock); 1259 spin_unlock_bh(&tgt->tgt_lock);
1204
1205 wait_for_completion(&io_req->tm_done); 1260 wait_for_completion(&io_req->tm_done);
1206
1207 spin_lock_bh(&tgt->tgt_lock); 1261 spin_lock_bh(&tgt->tgt_lock);
1208 io_req->wait_for_comp = 0; 1262 io_req->wait_for_comp = 0;
1209 rdata = io_req->tgt->rdata; 1263 goto done;
1210 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
1211 &tgt->flags);
1212 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1213 spin_unlock_bh(&tgt->tgt_lock);
1214
1215 if (!logo_issued) {
1216 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
1217 tgt->flags);
1218 mutex_lock(&lport->disc.disc_mutex);
1219 lport->tt.rport_logoff(rdata);
1220 mutex_unlock(&lport->disc.disc_mutex);
1221 do {
1222 msleep(BNX2FC_RELOGIN_WAIT_TIME);
1223 /*
1224 * If session not recovered, let SCSI-ml
1225 * escalate error recovery.
1226 */
1227 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
1228 return FAILED;
1229 } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
1230 &tgt->flags));
1231 }
1232 return SUCCESS;
1233 }
1234 if (rc == FAILED) {
1235 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1236 spin_unlock_bh(&tgt->tgt_lock);
1237 return rc;
1238 } 1264 }
1239 spin_unlock_bh(&tgt->tgt_lock); 1265 spin_unlock_bh(&tgt->tgt_lock);
1240 1266
@@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1247 /* Let the scsi-ml try to recover this command */ 1273 /* Let the scsi-ml try to recover this command */
1248 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1274 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1249 io_req->xid); 1275 io_req->xid);
1250 rc = FAILED; 1276 rc = bnx2fc_expl_logo(lport, io_req);
1277 goto out;
1251 } else { 1278 } else {
1252 /* 1279 /*
1253 * We come here even when there was a race condition 1280 * We come here even when there was a race condition
@@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1259 bnx2fc_scsi_done(io_req, DID_ABORT); 1286 bnx2fc_scsi_done(io_req, DID_ABORT);
1260 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1287 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1261 } 1288 }
1262 1289done:
1263 /* release the reference taken in eh_abort */ 1290 /* release the reference taken in eh_abort */
1264 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1291 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1292out:
1265 spin_unlock_bh(&tgt->tgt_lock); 1293 spin_unlock_bh(&tgt->tgt_lock);
1266 return rc; 1294 return rc;
1267} 1295}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c1800b531270..082a25c3117e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
185 BUG_ON(rc); 185 BUG_ON(rc);
186 } 186 }
187 187
188 list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
189 i++;
190 io_req = (struct bnx2fc_cmd *)list;
191 list_del_init(&io_req->link);
192 io_req->on_tmf_queue = 0;
193 BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
194 if (io_req->wait_for_comp)
195 complete(&io_req->tm_done);
196 }
197
188 list_for_each_safe(list, tmp, &tgt->els_queue) { 198 list_for_each_safe(list, tmp, &tgt->els_queue) {
189 i++; 199 i++;
190 io_req = (struct bnx2fc_cmd *)list; 200 io_req = (struct bnx2fc_cmd *)list;
@@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
213 223
214 BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); 224 BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
215 225
216 if (cancel_delayed_work(&io_req->timeout_work)) 226 if (cancel_delayed_work(&io_req->timeout_work)) {
227 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
228 &io_req->req_flags)) {
229 /* Handle eh_abort timeout */
230 BNX2FC_IO_DBG(io_req, "eh_abort for IO "
231 "in retire_q\n");
232 if (io_req->wait_for_comp)
233 complete(&io_req->tm_done);
234 }
217 kref_put(&io_req->refcount, bnx2fc_cmd_release); 235 kref_put(&io_req->refcount, bnx2fc_cmd_release);
236 }
218 237
219 clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 238 clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
220 } 239 }
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index f6d37d0271f7..aed0f5db3668 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_FCOE) += fcoe.o 1obj-$(CONFIG_FCOE) += fcoe.o
2obj-$(CONFIG_LIBFCOE) += libfcoe.o 2obj-$(CONFIG_LIBFCOE) += libfcoe.o
3 3
4libfcoe-objs := fcoe_ctlr.o fcoe_transport.o 4libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 76e3d0b5bfa6..fe30b1b65e1d 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -41,6 +41,7 @@
41 41
42#include <scsi/fc/fc_encaps.h> 42#include <scsi/fc/fc_encaps.h>
43#include <scsi/fc/fc_fip.h> 43#include <scsi/fc/fc_fip.h>
44#include <scsi/fc/fc_fcoe.h>
44 45
45#include <scsi/libfc.h> 46#include <scsi/libfc.h>
46#include <scsi/fc_frame.h> 47#include <scsi/fc_frame.h>
@@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
150static int fcoe_vport_disable(struct fc_vport *, bool disable); 151static int fcoe_vport_disable(struct fc_vport *, bool disable);
151static void fcoe_set_vport_symbolic_name(struct fc_vport *); 152static void fcoe_set_vport_symbolic_name(struct fc_vport *);
152static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 153static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
154static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
155static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
156
157static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
158 .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
159 .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
160 .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
161 .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
162 .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
163 .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
164 .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
165
166 .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
167 .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
168};
153 169
154static struct libfc_function_template fcoe_libfc_fcn_templ = { 170static struct libfc_function_template fcoe_libfc_fcn_templ = {
155 .frame_send = fcoe_xmit, 171 .frame_send = fcoe_xmit,
@@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = {
282static int fcoe_interface_setup(struct fcoe_interface *fcoe, 298static int fcoe_interface_setup(struct fcoe_interface *fcoe,
283 struct net_device *netdev) 299 struct net_device *netdev)
284{ 300{
285 struct fcoe_ctlr *fip = &fcoe->ctlr; 301 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
286 struct netdev_hw_addr *ha; 302 struct netdev_hw_addr *ha;
287 struct net_device *real_dev; 303 struct net_device *real_dev;
288 u8 flogi_maddr[ETH_ALEN]; 304 u8 flogi_maddr[ETH_ALEN];
@@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
366static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, 382static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
367 enum fip_state fip_mode) 383 enum fip_state fip_mode)
368{ 384{
385 struct fcoe_ctlr_device *ctlr_dev;
386 struct fcoe_ctlr *ctlr;
369 struct fcoe_interface *fcoe; 387 struct fcoe_interface *fcoe;
388 int size;
370 int err; 389 int err;
371 390
372 if (!try_module_get(THIS_MODULE)) { 391 if (!try_module_get(THIS_MODULE)) {
@@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
376 goto out; 395 goto out;
377 } 396 }
378 397
379 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); 398 size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
380 if (!fcoe) { 399 ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
381 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); 400 size);
401 if (!ctlr_dev) {
402 FCOE_DBG("Failed to add fcoe_ctlr_device\n");
382 fcoe = ERR_PTR(-ENOMEM); 403 fcoe = ERR_PTR(-ENOMEM);
383 goto out_putmod; 404 goto out_putmod;
384 } 405 }
385 406
407 ctlr = fcoe_ctlr_device_priv(ctlr_dev);
408 fcoe = fcoe_ctlr_priv(ctlr);
409
386 dev_hold(netdev); 410 dev_hold(netdev);
387 411
388 /* 412 /*
389 * Initialize FIP. 413 * Initialize FIP.
390 */ 414 */
391 fcoe_ctlr_init(&fcoe->ctlr, fip_mode); 415 fcoe_ctlr_init(ctlr, fip_mode);
392 fcoe->ctlr.send = fcoe_fip_send; 416 ctlr->send = fcoe_fip_send;
393 fcoe->ctlr.update_mac = fcoe_update_src_mac; 417 ctlr->update_mac = fcoe_update_src_mac;
394 fcoe->ctlr.get_src_addr = fcoe_get_src_mac; 418 ctlr->get_src_addr = fcoe_get_src_mac;
395 419
396 err = fcoe_interface_setup(fcoe, netdev); 420 err = fcoe_interface_setup(fcoe, netdev);
397 if (err) { 421 if (err) {
398 fcoe_ctlr_destroy(&fcoe->ctlr); 422 fcoe_ctlr_destroy(ctlr);
399 kfree(fcoe); 423 fcoe_ctlr_device_delete(ctlr_dev);
400 dev_put(netdev); 424 dev_put(netdev);
401 fcoe = ERR_PTR(err); 425 fcoe = ERR_PTR(err);
402 goto out_putmod; 426 goto out_putmod;
@@ -419,7 +443,7 @@ out:
419static void fcoe_interface_remove(struct fcoe_interface *fcoe) 443static void fcoe_interface_remove(struct fcoe_interface *fcoe)
420{ 444{
421 struct net_device *netdev = fcoe->netdev; 445 struct net_device *netdev = fcoe->netdev;
422 struct fcoe_ctlr *fip = &fcoe->ctlr; 446 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
423 u8 flogi_maddr[ETH_ALEN]; 447 u8 flogi_maddr[ETH_ALEN];
424 const struct net_device_ops *ops; 448 const struct net_device_ops *ops;
425 449
@@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
462static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 486static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
463{ 487{
464 struct net_device *netdev = fcoe->netdev; 488 struct net_device *netdev = fcoe->netdev;
465 struct fcoe_ctlr *fip = &fcoe->ctlr; 489 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
490 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
466 491
467 rtnl_lock(); 492 rtnl_lock();
468 if (!fcoe->removed) 493 if (!fcoe->removed)
@@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
472 /* Release the self-reference taken during fcoe_interface_create() */ 497 /* Release the self-reference taken during fcoe_interface_create() */
473 /* tear-down the FCoE controller */ 498 /* tear-down the FCoE controller */
474 fcoe_ctlr_destroy(fip); 499 fcoe_ctlr_destroy(fip);
475 scsi_host_put(fcoe->ctlr.lp->host); 500 scsi_host_put(fip->lp->host);
476 kfree(fcoe); 501 fcoe_ctlr_device_delete(ctlr_dev);
477 dev_put(netdev); 502 dev_put(netdev);
478 module_put(THIS_MODULE); 503 module_put(THIS_MODULE);
479} 504}
@@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
493 struct net_device *orig_dev) 518 struct net_device *orig_dev)
494{ 519{
495 struct fcoe_interface *fcoe; 520 struct fcoe_interface *fcoe;
521 struct fcoe_ctlr *ctlr;
496 522
497 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); 523 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
498 fcoe_ctlr_recv(&fcoe->ctlr, skb); 524 ctlr = fcoe_to_ctlr(fcoe);
525 fcoe_ctlr_recv(ctlr, skb);
499 return 0; 526 return 0;
500} 527}
501 528
@@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
645 u32 mfs; 672 u32 mfs;
646 u64 wwnn, wwpn; 673 u64 wwnn, wwpn;
647 struct fcoe_interface *fcoe; 674 struct fcoe_interface *fcoe;
675 struct fcoe_ctlr *ctlr;
648 struct fcoe_port *port; 676 struct fcoe_port *port;
649 677
650 /* Setup lport private data to point to fcoe softc */ 678 /* Setup lport private data to point to fcoe softc */
651 port = lport_priv(lport); 679 port = lport_priv(lport);
652 fcoe = port->priv; 680 fcoe = port->priv;
681 ctlr = fcoe_to_ctlr(fcoe);
653 682
654 /* 683 /*
655 * Determine max frame size based on underlying device and optional 684 * Determine max frame size based on underlying device and optional
@@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
676 705
677 if (!lport->vport) { 706 if (!lport->vport) {
678 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 707 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
679 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); 708 wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
680 fc_set_wwnn(lport, wwnn); 709 fc_set_wwnn(lport, wwnn);
681 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 710 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
682 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 711 wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
683 2, 0); 712 2, 0);
684 fc_set_wwpn(lport, wwpn); 713 fc_set_wwpn(lport, wwpn);
685 } 714 }
@@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
1056static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, 1085static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
1057 struct device *parent, int npiv) 1086 struct device *parent, int npiv)
1058{ 1087{
1088 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
1059 struct net_device *netdev = fcoe->netdev; 1089 struct net_device *netdev = fcoe->netdev;
1060 struct fc_lport *lport, *n_port; 1090 struct fc_lport *lport, *n_port;
1061 struct fcoe_port *port; 1091 struct fcoe_port *port;
@@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
1119 } 1149 }
1120 1150
1121 /* Initialize the library */ 1151 /* Initialize the library */
1122 rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1); 1152 rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
1123 if (rc) { 1153 if (rc) {
1124 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " 1154 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
1125 "interface\n"); 1155 "interface\n");
@@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1386{ 1416{
1387 struct fc_lport *lport; 1417 struct fc_lport *lport;
1388 struct fcoe_rcv_info *fr; 1418 struct fcoe_rcv_info *fr;
1419 struct fcoe_ctlr *ctlr;
1389 struct fcoe_interface *fcoe; 1420 struct fcoe_interface *fcoe;
1390 struct fc_frame_header *fh; 1421 struct fc_frame_header *fh;
1391 struct fcoe_percpu_s *fps; 1422 struct fcoe_percpu_s *fps;
@@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1393 unsigned int cpu; 1424 unsigned int cpu;
1394 1425
1395 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); 1426 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
1396 lport = fcoe->ctlr.lp; 1427 ctlr = fcoe_to_ctlr(fcoe);
1428 lport = ctlr->lp;
1397 if (unlikely(!lport)) { 1429 if (unlikely(!lport)) {
1398 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); 1430 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
1399 goto err2; 1431 goto err2;
@@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1409 1441
1410 eh = eth_hdr(skb); 1442 eh = eth_hdr(skb);
1411 1443
1412 if (is_fip_mode(&fcoe->ctlr) && 1444 if (is_fip_mode(ctlr) &&
1413 compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) { 1445 compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
1414 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", 1446 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
1415 eh->h_source); 1447 eh->h_source);
1416 goto err; 1448 goto err;
@@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1544 unsigned int elen; /* eth header, may include vlan */ 1576 unsigned int elen; /* eth header, may include vlan */
1545 struct fcoe_port *port = lport_priv(lport); 1577 struct fcoe_port *port = lport_priv(lport);
1546 struct fcoe_interface *fcoe = port->priv; 1578 struct fcoe_interface *fcoe = port->priv;
1579 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
1547 u8 sof, eof; 1580 u8 sof, eof;
1548 struct fcoe_hdr *hp; 1581 struct fcoe_hdr *hp;
1549 1582
@@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1559 } 1592 }
1560 1593
1561 if (unlikely(fh->fh_type == FC_TYPE_ELS) && 1594 if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
1562 fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb)) 1595 fcoe_ctlr_els_send(ctlr, lport, skb))
1563 return 0; 1596 return 0;
1564 1597
1565 sof = fr_sof(fp); 1598 sof = fr_sof(fp);
@@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1623 /* fill up mac and fcoe headers */ 1656 /* fill up mac and fcoe headers */
1624 eh = eth_hdr(skb); 1657 eh = eth_hdr(skb);
1625 eh->h_proto = htons(ETH_P_FCOE); 1658 eh->h_proto = htons(ETH_P_FCOE);
1626 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN); 1659 memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
1627 if (fcoe->ctlr.map_dest) 1660 if (ctlr->map_dest)
1628 memcpy(eh->h_dest + 3, fh->fh_d_id, 3); 1661 memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
1629 1662
1630 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 1663 if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
1631 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); 1664 memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
1632 else 1665 else
1633 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 1666 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1634 1667
@@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
1677static inline int fcoe_filter_frames(struct fc_lport *lport, 1710static inline int fcoe_filter_frames(struct fc_lport *lport,
1678 struct fc_frame *fp) 1711 struct fc_frame *fp)
1679{ 1712{
1713 struct fcoe_ctlr *ctlr;
1680 struct fcoe_interface *fcoe; 1714 struct fcoe_interface *fcoe;
1681 struct fc_frame_header *fh; 1715 struct fc_frame_header *fh;
1682 struct sk_buff *skb = (struct sk_buff *)fp; 1716 struct sk_buff *skb = (struct sk_buff *)fp;
@@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
1698 return 0; 1732 return 0;
1699 1733
1700 fcoe = ((struct fcoe_port *)lport_priv(lport))->priv; 1734 fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
1701 if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && 1735 ctlr = fcoe_to_ctlr(fcoe);
1736 if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
1702 ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { 1737 ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
1703 FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); 1738 FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
1704 return -EINVAL; 1739 return -EINVAL;
@@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1877 ulong event, void *ptr) 1912 ulong event, void *ptr)
1878{ 1913{
1879 struct dcb_app_type *entry = ptr; 1914 struct dcb_app_type *entry = ptr;
1915 struct fcoe_ctlr *ctlr;
1880 struct fcoe_interface *fcoe; 1916 struct fcoe_interface *fcoe;
1881 struct net_device *netdev; 1917 struct net_device *netdev;
1882 struct fcoe_port *port; 1918 struct fcoe_port *port;
@@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1894 if (!fcoe) 1930 if (!fcoe)
1895 return NOTIFY_OK; 1931 return NOTIFY_OK;
1896 1932
1933 ctlr = fcoe_to_ctlr(fcoe);
1934
1897 if (entry->dcbx & DCB_CAP_DCBX_VER_CEE) 1935 if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
1898 prio = ffs(entry->app.priority) - 1; 1936 prio = ffs(entry->app.priority) - 1;
1899 else 1937 else
@@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1904 1942
1905 if (entry->app.protocol == ETH_P_FIP || 1943 if (entry->app.protocol == ETH_P_FIP ||
1906 entry->app.protocol == ETH_P_FCOE) 1944 entry->app.protocol == ETH_P_FCOE)
1907 fcoe->ctlr.priority = prio; 1945 ctlr->priority = prio;
1908 1946
1909 if (entry->app.protocol == ETH_P_FCOE) { 1947 if (entry->app.protocol == ETH_P_FCOE) {
1910 port = lport_priv(fcoe->ctlr.lp); 1948 port = lport_priv(ctlr->lp);
1911 port->priority = prio; 1949 port->priority = prio;
1912 } 1950 }
1913 1951
@@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1929{ 1967{
1930 struct fc_lport *lport = NULL; 1968 struct fc_lport *lport = NULL;
1931 struct net_device *netdev = ptr; 1969 struct net_device *netdev = ptr;
1970 struct fcoe_ctlr *ctlr;
1932 struct fcoe_interface *fcoe; 1971 struct fcoe_interface *fcoe;
1933 struct fcoe_port *port; 1972 struct fcoe_port *port;
1934 struct fcoe_dev_stats *stats; 1973 struct fcoe_dev_stats *stats;
@@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1938 1977
1939 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 1978 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1940 if (fcoe->netdev == netdev) { 1979 if (fcoe->netdev == netdev) {
1941 lport = fcoe->ctlr.lp; 1980 ctlr = fcoe_to_ctlr(fcoe);
1981 lport = ctlr->lp;
1942 break; 1982 break;
1943 } 1983 }
1944 } 1984 }
@@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1967 break; 2007 break;
1968 case NETDEV_UNREGISTER: 2008 case NETDEV_UNREGISTER:
1969 list_del(&fcoe->list); 2009 list_del(&fcoe->list);
1970 port = lport_priv(fcoe->ctlr.lp); 2010 port = lport_priv(ctlr->lp);
1971 queue_work(fcoe_wq, &port->destroy_work); 2011 queue_work(fcoe_wq, &port->destroy_work);
1972 goto out; 2012 goto out;
1973 break; 2013 break;
@@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1982 fcoe_link_speed_update(lport); 2022 fcoe_link_speed_update(lport);
1983 2023
1984 if (link_possible && !fcoe_link_ok(lport)) 2024 if (link_possible && !fcoe_link_ok(lport))
1985 fcoe_ctlr_link_up(&fcoe->ctlr); 2025 fcoe_ctlr_link_up(ctlr);
1986 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { 2026 else if (fcoe_ctlr_link_down(ctlr)) {
1987 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 2027 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1988 stats->LinkFailureCount++; 2028 stats->LinkFailureCount++;
1989 put_cpu(); 2029 put_cpu();
@@ -2003,6 +2043,7 @@ out:
2003 */ 2043 */
2004static int fcoe_disable(struct net_device *netdev) 2044static int fcoe_disable(struct net_device *netdev)
2005{ 2045{
2046 struct fcoe_ctlr *ctlr;
2006 struct fcoe_interface *fcoe; 2047 struct fcoe_interface *fcoe;
2007 int rc = 0; 2048 int rc = 0;
2008 2049
@@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev)
2013 rtnl_unlock(); 2054 rtnl_unlock();
2014 2055
2015 if (fcoe) { 2056 if (fcoe) {
2016 fcoe_ctlr_link_down(&fcoe->ctlr); 2057 ctlr = fcoe_to_ctlr(fcoe);
2017 fcoe_clean_pending_queue(fcoe->ctlr.lp); 2058 fcoe_ctlr_link_down(ctlr);
2059 fcoe_clean_pending_queue(ctlr->lp);
2018 } else 2060 } else
2019 rc = -ENODEV; 2061 rc = -ENODEV;
2020 2062
@@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev)
2032 */ 2074 */
2033static int fcoe_enable(struct net_device *netdev) 2075static int fcoe_enable(struct net_device *netdev)
2034{ 2076{
2077 struct fcoe_ctlr *ctlr;
2035 struct fcoe_interface *fcoe; 2078 struct fcoe_interface *fcoe;
2036 int rc = 0; 2079 int rc = 0;
2037 2080
@@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev)
2040 fcoe = fcoe_hostlist_lookup_port(netdev); 2083 fcoe = fcoe_hostlist_lookup_port(netdev);
2041 rtnl_unlock(); 2084 rtnl_unlock();
2042 2085
2043 if (!fcoe) 2086 if (!fcoe) {
2044 rc = -ENODEV; 2087 rc = -ENODEV;
2045 else if (!fcoe_link_ok(fcoe->ctlr.lp)) 2088 goto out;
2046 fcoe_ctlr_link_up(&fcoe->ctlr); 2089 }
2090
2091 ctlr = fcoe_to_ctlr(fcoe);
2092
2093 if (!fcoe_link_ok(ctlr->lp))
2094 fcoe_ctlr_link_up(ctlr);
2047 2095
2096out:
2048 mutex_unlock(&fcoe_config_mutex); 2097 mutex_unlock(&fcoe_config_mutex);
2049 return rc; 2098 return rc;
2050} 2099}
@@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev)
2059 */ 2108 */
2060static int fcoe_destroy(struct net_device *netdev) 2109static int fcoe_destroy(struct net_device *netdev)
2061{ 2110{
2111 struct fcoe_ctlr *ctlr;
2062 struct fcoe_interface *fcoe; 2112 struct fcoe_interface *fcoe;
2063 struct fc_lport *lport; 2113 struct fc_lport *lport;
2064 struct fcoe_port *port; 2114 struct fcoe_port *port;
@@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev)
2071 rc = -ENODEV; 2121 rc = -ENODEV;
2072 goto out_nodev; 2122 goto out_nodev;
2073 } 2123 }
2074 lport = fcoe->ctlr.lp; 2124 ctlr = fcoe_to_ctlr(fcoe);
2125 lport = ctlr->lp;
2075 port = lport_priv(lport); 2126 port = lport_priv(lport);
2076 list_del(&fcoe->list); 2127 list_del(&fcoe->list);
2077 queue_work(fcoe_wq, &port->destroy_work); 2128 queue_work(fcoe_wq, &port->destroy_work);
@@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2126 int dcbx; 2177 int dcbx;
2127 u8 fup, up; 2178 u8 fup, up;
2128 struct net_device *netdev = fcoe->realdev; 2179 struct net_device *netdev = fcoe->realdev;
2129 struct fcoe_port *port = lport_priv(fcoe->ctlr.lp); 2180 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2181 struct fcoe_port *port = lport_priv(ctlr->lp);
2130 struct dcb_app app = { 2182 struct dcb_app app = {
2131 .priority = 0, 2183 .priority = 0,
2132 .protocol = ETH_P_FCOE 2184 .protocol = ETH_P_FCOE
@@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2149 } 2201 }
2150 2202
2151 port->priority = ffs(up) ? ffs(up) - 1 : 0; 2203 port->priority = ffs(up) ? ffs(up) - 1 : 0;
2152 fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority; 2204 ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
2153 } 2205 }
2154#endif 2206#endif
2155} 2207}
@@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2166static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) 2218static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2167{ 2219{
2168 int rc = 0; 2220 int rc = 0;
2221 struct fcoe_ctlr_device *ctlr_dev;
2222 struct fcoe_ctlr *ctlr;
2169 struct fcoe_interface *fcoe; 2223 struct fcoe_interface *fcoe;
2170 struct fc_lport *lport; 2224 struct fc_lport *lport;
2171 2225
@@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2184 goto out_nodev; 2238 goto out_nodev;
2185 } 2239 }
2186 2240
2187 lport = fcoe_if_create(fcoe, &netdev->dev, 0); 2241 ctlr = fcoe_to_ctlr(fcoe);
2242 ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
2243 lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
2188 if (IS_ERR(lport)) { 2244 if (IS_ERR(lport)) {
2189 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2245 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2190 netdev->name); 2246 netdev->name);
@@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2195 } 2251 }
2196 2252
2197 /* Make this the "master" N_Port */ 2253 /* Make this the "master" N_Port */
2198 fcoe->ctlr.lp = lport; 2254 ctlr->lp = lport;
2199 2255
2200 /* setup DCB priority attributes. */ 2256 /* setup DCB priority attributes. */
2201 fcoe_dcb_create(fcoe); 2257 fcoe_dcb_create(fcoe);
@@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2208 fc_fabric_login(lport); 2264 fc_fabric_login(lport);
2209 if (!fcoe_link_ok(lport)) { 2265 if (!fcoe_link_ok(lport)) {
2210 rtnl_unlock(); 2266 rtnl_unlock();
2211 fcoe_ctlr_link_up(&fcoe->ctlr); 2267 fcoe_ctlr_link_up(ctlr);
2212 mutex_unlock(&fcoe_config_mutex); 2268 mutex_unlock(&fcoe_config_mutex);
2213 return rc; 2269 return rc;
2214 } 2270 }
@@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost)
2320 struct fc_lport *lport = shost_priv(shost); 2376 struct fc_lport *lport = shost_priv(shost);
2321 struct fcoe_port *port = lport_priv(lport); 2377 struct fcoe_port *port = lport_priv(lport);
2322 struct fcoe_interface *fcoe = port->priv; 2378 struct fcoe_interface *fcoe = port->priv;
2379 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2323 2380
2324 fcoe_ctlr_link_down(&fcoe->ctlr); 2381 fcoe_ctlr_link_down(ctlr);
2325 fcoe_clean_pending_queue(fcoe->ctlr.lp); 2382 fcoe_clean_pending_queue(ctlr->lp);
2326 if (!fcoe_link_ok(fcoe->ctlr.lp)) 2383 if (!fcoe_link_ok(ctlr->lp))
2327 fcoe_ctlr_link_up(&fcoe->ctlr); 2384 fcoe_ctlr_link_up(ctlr);
2328 return 0; 2385 return 0;
2329} 2386}
2330 2387
@@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev)
2359 */ 2416 */
2360static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) 2417static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2361{ 2418{
2419 struct fcoe_ctlr *ctlr;
2362 struct fcoe_interface *fcoe; 2420 struct fcoe_interface *fcoe;
2363 2421
2364 fcoe = fcoe_hostlist_lookup_port(netdev); 2422 fcoe = fcoe_hostlist_lookup_port(netdev);
2365 return (fcoe) ? fcoe->ctlr.lp : NULL; 2423 ctlr = fcoe_to_ctlr(fcoe);
2424 return (fcoe) ? ctlr->lp : NULL;
2366} 2425}
2367 2426
2368/** 2427/**
@@ -2466,6 +2525,7 @@ module_init(fcoe_init);
2466static void __exit fcoe_exit(void) 2525static void __exit fcoe_exit(void)
2467{ 2526{
2468 struct fcoe_interface *fcoe, *tmp; 2527 struct fcoe_interface *fcoe, *tmp;
2528 struct fcoe_ctlr *ctlr;
2469 struct fcoe_port *port; 2529 struct fcoe_port *port;
2470 unsigned int cpu; 2530 unsigned int cpu;
2471 2531
@@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void)
2477 rtnl_lock(); 2537 rtnl_lock();
2478 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { 2538 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2479 list_del(&fcoe->list); 2539 list_del(&fcoe->list);
2480 port = lport_priv(fcoe->ctlr.lp); 2540 ctlr = fcoe_to_ctlr(fcoe);
2541 port = lport_priv(ctlr->lp);
2481 queue_work(fcoe_wq, &port->destroy_work); 2542 queue_work(fcoe_wq, &port->destroy_work);
2482 } 2543 }
2483 rtnl_unlock(); 2544 rtnl_unlock();
@@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2573{ 2634{
2574 struct fcoe_port *port = lport_priv(lport); 2635 struct fcoe_port *port = lport_priv(lport);
2575 struct fcoe_interface *fcoe = port->priv; 2636 struct fcoe_interface *fcoe = port->priv;
2576 struct fcoe_ctlr *fip = &fcoe->ctlr; 2637 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
2577 struct fc_frame_header *fh = fc_frame_header_get(fp); 2638 struct fc_frame_header *fh = fc_frame_header_get(fp);
2578 2639
2579 switch (op) { 2640 switch (op) {
@@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport,
2730 __fcoe_get_lesb(lport, fc_lesb, netdev); 2791 __fcoe_get_lesb(lport, fc_lesb, netdev);
2731} 2792}
2732 2793
2794static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
2795{
2796 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
2797 struct net_device *netdev = fcoe_netdev(fip->lp);
2798 struct fcoe_fc_els_lesb *fcoe_lesb;
2799 struct fc_els_lesb fc_lesb;
2800
2801 __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
2802 fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
2803
2804 ctlr_dev->lesb.lesb_link_fail =
2805 ntohl(fcoe_lesb->lesb_link_fail);
2806 ctlr_dev->lesb.lesb_vlink_fail =
2807 ntohl(fcoe_lesb->lesb_vlink_fail);
2808 ctlr_dev->lesb.lesb_miss_fka =
2809 ntohl(fcoe_lesb->lesb_miss_fka);
2810 ctlr_dev->lesb.lesb_symb_err =
2811 ntohl(fcoe_lesb->lesb_symb_err);
2812 ctlr_dev->lesb.lesb_err_block =
2813 ntohl(fcoe_lesb->lesb_err_block);
2814 ctlr_dev->lesb.lesb_fcs_error =
2815 ntohl(fcoe_lesb->lesb_fcs_error);
2816}
2817
2818static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
2819{
2820 struct fcoe_ctlr_device *ctlr_dev =
2821 fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
2822 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
2823 struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
2824
2825 fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
2826}
2827
2733/** 2828/**
2734 * fcoe_set_port_id() - Callback from libfc when Port_ID is set. 2829 * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
2735 * @lport: the local port 2830 * @lport: the local port
@@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport,
2747{ 2842{
2748 struct fcoe_port *port = lport_priv(lport); 2843 struct fcoe_port *port = lport_priv(lport);
2749 struct fcoe_interface *fcoe = port->priv; 2844 struct fcoe_interface *fcoe = port->priv;
2845 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2750 2846
2751 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2847 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2752 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); 2848 fcoe_ctlr_recv_flogi(ctlr, lport, fp);
2753} 2849}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 96ac938d39cc..a624add4f8ec 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -68,7 +68,6 @@ do { \
68 * @netdev: The associated net device 68 * @netdev: The associated net device
69 * @fcoe_packet_type: FCoE packet type 69 * @fcoe_packet_type: FCoE packet type
70 * @fip_packet_type: FIP packet type 70 * @fip_packet_type: FIP packet type
71 * @ctlr: The FCoE controller (for FIP)
72 * @oem: The offload exchange manager for all local port 71 * @oem: The offload exchange manager for all local port
73 * instances associated with this port 72 * instances associated with this port
74 * @removed: Indicates fcoe interface removed from net device 73 * @removed: Indicates fcoe interface removed from net device
@@ -80,12 +79,15 @@ struct fcoe_interface {
80 struct net_device *realdev; 79 struct net_device *realdev;
81 struct packet_type fcoe_packet_type; 80 struct packet_type fcoe_packet_type;
82 struct packet_type fip_packet_type; 81 struct packet_type fip_packet_type;
83 struct fcoe_ctlr ctlr;
84 struct fc_exch_mgr *oem; 82 struct fc_exch_mgr *oem;
85 u8 removed; 83 u8 removed;
86}; 84};
87 85
88#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) 86#define fcoe_to_ctlr(x) \
87 (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
88
89#define fcoe_from_ctlr(x) \
90 ((struct fcoe_interface *)((x) + 1))
89 91
90/** 92/**
91 * fcoe_netdev() - Return the net device associated with a local port 93 * fcoe_netdev() - Return the net device associated with a local port
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5a4c7250aa77..d68d57241ee6 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
160} 160}
161EXPORT_SYMBOL(fcoe_ctlr_init); 161EXPORT_SYMBOL(fcoe_ctlr_init);
162 162
163static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
164{
165 struct fcoe_ctlr *fip = new->fip;
166 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
167 struct fcoe_fcf_device temp, *fcf_dev;
168 int rc = 0;
169
170 LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
171 new->fabric_name, new->fcf_mac);
172
173 mutex_lock(&ctlr_dev->lock);
174
175 temp.fabric_name = new->fabric_name;
176 temp.switch_name = new->switch_name;
177 temp.fc_map = new->fc_map;
178 temp.vfid = new->vfid;
179 memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
180 temp.priority = new->pri;
181 temp.fka_period = new->fka_period;
182 temp.selected = 0; /* default to unselected */
183
184 fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
185 if (unlikely(!fcf_dev)) {
186 rc = -ENOMEM;
187 goto out;
188 }
189
190 /*
191 * The fcoe_sysfs layer can return a CONNECTED fcf that
192 * has a priv (fcf was never deleted) or a CONNECTED fcf
193 * that doesn't have a priv (fcf was deleted). However,
194 * libfcoe will always delete FCFs before trying to add
195 * them. This is ensured because both recv_adv and
196 * age_fcfs are protected by the the fcoe_ctlr's mutex.
197 * This means that we should never get a FCF with a
198 * non-NULL priv pointer.
199 */
200 BUG_ON(fcf_dev->priv);
201
202 fcf_dev->priv = new;
203 new->fcf_dev = fcf_dev;
204
205 list_add(&new->list, &fip->fcfs);
206 fip->fcf_count++;
207
208out:
209 mutex_unlock(&ctlr_dev->lock);
210 return rc;
211}
212
213static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
214{
215 struct fcoe_ctlr *fip = new->fip;
216 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
217 struct fcoe_fcf_device *fcf_dev;
218
219 list_del(&new->list);
220 fip->fcf_count--;
221
222 mutex_lock(&ctlr_dev->lock);
223
224 fcf_dev = fcoe_fcf_to_fcf_dev(new);
225 WARN_ON(!fcf_dev);
226 new->fcf_dev = NULL;
227 fcoe_fcf_device_delete(fcf_dev);
228 kfree(new);
229
230 mutex_unlock(&ctlr_dev->lock);
231}
232
163/** 233/**
164 * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller 234 * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
165 * @fip: The FCoE controller whose FCFs are to be reset 235 * @fip: The FCoE controller whose FCFs are to be reset
@@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
173 243
174 fip->sel_fcf = NULL; 244 fip->sel_fcf = NULL;
175 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 245 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
176 list_del(&fcf->list); 246 fcoe_sysfs_fcf_del(fcf);
177 kfree(fcf);
178 } 247 }
179 fip->fcf_count = 0; 248 WARN_ON(fip->fcf_count);
249
180 fip->sel_time = 0; 250 fip->sel_time = 0;
181} 251}
182 252
@@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
717 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); 787 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
718 unsigned long deadline; 788 unsigned long deadline;
719 unsigned long sel_time = 0; 789 unsigned long sel_time = 0;
790 struct list_head del_list;
720 struct fcoe_dev_stats *stats; 791 struct fcoe_dev_stats *stats;
721 792
793 INIT_LIST_HEAD(&del_list);
794
722 stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); 795 stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
723 796
724 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 797 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
739 if (time_after_eq(jiffies, deadline)) { 812 if (time_after_eq(jiffies, deadline)) {
740 if (fip->sel_fcf == fcf) 813 if (fip->sel_fcf == fcf)
741 fip->sel_fcf = NULL; 814 fip->sel_fcf = NULL;
815 /*
816 * Move to delete list so we can call
817 * fcoe_sysfs_fcf_del (which can sleep)
818 * after the put_cpu().
819 */
742 list_del(&fcf->list); 820 list_del(&fcf->list);
743 WARN_ON(!fip->fcf_count); 821 list_add(&fcf->list, &del_list);
744 fip->fcf_count--;
745 kfree(fcf);
746 stats->VLinkFailureCount++; 822 stats->VLinkFailureCount++;
747 } else { 823 } else {
748 if (time_after(next_timer, deadline)) 824 if (time_after(next_timer, deadline))
@@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
753 } 829 }
754 } 830 }
755 put_cpu(); 831 put_cpu();
832
833 list_for_each_entry_safe(fcf, next, &del_list, list) {
834 /* Removes fcf from current list */
835 fcoe_sysfs_fcf_del(fcf);
836 }
837
756 if (sel_time && !fip->sel_fcf && !fip->sel_time) { 838 if (sel_time && !fip->sel_fcf && !fip->sel_time) {
757 sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); 839 sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
758 fip->sel_time = sel_time; 840 fip->sel_time = sel_time;
@@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
903{ 985{
904 struct fcoe_fcf *fcf; 986 struct fcoe_fcf *fcf;
905 struct fcoe_fcf new; 987 struct fcoe_fcf new;
906 struct fcoe_fcf *found;
907 unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); 988 unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
908 int first = 0; 989 int first = 0;
909 int mtu_valid; 990 int mtu_valid;
991 int found = 0;
992 int rc = 0;
910 993
911 if (fcoe_ctlr_parse_adv(fip, skb, &new)) 994 if (fcoe_ctlr_parse_adv(fip, skb, &new))
912 return; 995 return;
913 996
914 mutex_lock(&fip->ctlr_mutex); 997 mutex_lock(&fip->ctlr_mutex);
915 first = list_empty(&fip->fcfs); 998 first = list_empty(&fip->fcfs);
916 found = NULL;
917 list_for_each_entry(fcf, &fip->fcfs, list) { 999 list_for_each_entry(fcf, &fip->fcfs, list) {
918 if (fcf->switch_name == new.switch_name && 1000 if (fcf->switch_name == new.switch_name &&
919 fcf->fabric_name == new.fabric_name && 1001 fcf->fabric_name == new.fabric_name &&
920 fcf->fc_map == new.fc_map && 1002 fcf->fc_map == new.fc_map &&
921 compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { 1003 compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
922 found = fcf; 1004 found = 1;
923 break; 1005 break;
924 } 1006 }
925 } 1007 }
@@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
931 if (!fcf) 1013 if (!fcf)
932 goto out; 1014 goto out;
933 1015
934 fip->fcf_count++;
935 memcpy(fcf, &new, sizeof(new)); 1016 memcpy(fcf, &new, sizeof(new));
936 list_add(&fcf->list, &fip->fcfs); 1017 fcf->fip = fip;
1018 rc = fcoe_sysfs_fcf_add(fcf);
1019 if (rc) {
1020 printk(KERN_ERR "Failed to allocate sysfs instance "
1021 "for FCF, fab %16.16llx mac %pM\n",
1022 new.fabric_name, new.fcf_mac);
1023 kfree(fcf);
1024 goto out;
1025 }
937 } else { 1026 } else {
938 /* 1027 /*
939 * Update the FCF's keep-alive descriptor flags. 1028 * Update the FCF's keep-alive descriptor flags.
@@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
954 fcf->fka_period = new.fka_period; 1043 fcf->fka_period = new.fka_period;
955 memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN); 1044 memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
956 } 1045 }
1046
957 mtu_valid = fcoe_ctlr_mtu_valid(fcf); 1047 mtu_valid = fcoe_ctlr_mtu_valid(fcf);
958 fcf->time = jiffies; 1048 fcf->time = jiffies;
959 if (!found) 1049 if (!found)
@@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
996 time_before(fip->sel_time, fip->timer.expires)) 1086 time_before(fip->sel_time, fip->timer.expires))
997 mod_timer(&fip->timer, fip->sel_time); 1087 mod_timer(&fip->timer, fip->sel_time);
998 } 1088 }
1089
999out: 1090out:
1000 mutex_unlock(&fip->ctlr_mutex); 1091 mutex_unlock(&fip->ctlr_mutex);
1001} 1092}
@@ -2718,9 +2809,9 @@ unlock:
2718 2809
2719/** 2810/**
2720 * fcoe_libfc_config() - Sets up libfc related properties for local port 2811 * fcoe_libfc_config() - Sets up libfc related properties for local port
2721 * @lp: The local port to configure libfc for 2812 * @lport: The local port to configure libfc for
2722 * @fip: The FCoE controller in use by the local port 2813 * @fip: The FCoE controller in use by the local port
2723 * @tt: The libfc function template 2814 * @tt: The libfc function template
2724 * @init_fcp: If non-zero, the FCP portion of libfc should be initialized 2815 * @init_fcp: If non-zero, the FCP portion of libfc should be initialized
2725 * 2816 *
2726 * Returns : 0 for success 2817 * Returns : 0 for success
@@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
2753 return 0; 2844 return 0;
2754} 2845}
2755EXPORT_SYMBOL_GPL(fcoe_libfc_config); 2846EXPORT_SYMBOL_GPL(fcoe_libfc_config);
2847
2848void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
2849{
2850 struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
2851 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
2852 struct fcoe_fcf *fcf;
2853
2854 mutex_lock(&fip->ctlr_mutex);
2855 mutex_lock(&ctlr_dev->lock);
2856
2857 fcf = fcoe_fcf_device_priv(fcf_dev);
2858 if (fcf)
2859 fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
2860 else
2861 fcf_dev->selected = 0;
2862
2863 mutex_unlock(&ctlr_dev->lock);
2864 mutex_unlock(&fip->ctlr_mutex);
2865}
2866EXPORT_SYMBOL(fcoe_fcf_get_selected);
2867
2868void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
2869{
2870 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
2871
2872 mutex_lock(&ctlr->ctlr_mutex);
2873 switch (ctlr->mode) {
2874 case FIP_MODE_FABRIC:
2875 ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
2876 break;
2877 case FIP_MODE_VN2VN:
2878 ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
2879 break;
2880 default:
2881 ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
2882 break;
2883 }
2884 mutex_unlock(&ctlr->ctlr_mutex);
2885}
2886EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644
index 000000000000..2bc163198d33
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -0,0 +1,832 @@
1/*
2 * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/etherdevice.h>
24
25#include <scsi/fcoe_sysfs.h>
26
27static atomic_t ctlr_num;
28static atomic_t fcf_num;
29
30/*
31 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
32 * should insulate the loss of a fcf.
33 */
34static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */
35
36module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
37 uint, S_IRUGO|S_IWUSR);
38MODULE_PARM_DESC(fcf_dev_loss_tmo,
39 "Maximum number of seconds that libfcoe should"
40 " insulate the loss of a fcf. Once this value is"
41 " exceeded, the fcf is removed.");
42
43/*
44 * These are used by the fcoe_*_show_function routines, they
45 * are intentionally placed in the .c file as they're not intended
46 * for use throughout the code.
47 */
48#define fcoe_ctlr_id(x) \
49 ((x)->id)
50#define fcoe_ctlr_work_q_name(x) \
51 ((x)->work_q_name)
52#define fcoe_ctlr_work_q(x) \
53 ((x)->work_q)
54#define fcoe_ctlr_devloss_work_q_name(x) \
55 ((x)->devloss_work_q_name)
56#define fcoe_ctlr_devloss_work_q(x) \
57 ((x)->devloss_work_q)
58#define fcoe_ctlr_mode(x) \
59 ((x)->mode)
60#define fcoe_ctlr_fcf_dev_loss_tmo(x) \
61 ((x)->fcf_dev_loss_tmo)
62#define fcoe_ctlr_link_fail(x) \
63 ((x)->lesb.lesb_link_fail)
64#define fcoe_ctlr_vlink_fail(x) \
65 ((x)->lesb.lesb_vlink_fail)
66#define fcoe_ctlr_miss_fka(x) \
67 ((x)->lesb.lesb_miss_fka)
68#define fcoe_ctlr_symb_err(x) \
69 ((x)->lesb.lesb_symb_err)
70#define fcoe_ctlr_err_block(x) \
71 ((x)->lesb.lesb_err_block)
72#define fcoe_ctlr_fcs_error(x) \
73 ((x)->lesb.lesb_fcs_error)
74#define fcoe_fcf_state(x) \
75 ((x)->state)
76#define fcoe_fcf_fabric_name(x) \
77 ((x)->fabric_name)
78#define fcoe_fcf_switch_name(x) \
79 ((x)->switch_name)
80#define fcoe_fcf_fc_map(x) \
81 ((x)->fc_map)
82#define fcoe_fcf_vfid(x) \
83 ((x)->vfid)
84#define fcoe_fcf_mac(x) \
85 ((x)->mac)
86#define fcoe_fcf_priority(x) \
87 ((x)->priority)
88#define fcoe_fcf_fka_period(x) \
89 ((x)->fka_period)
90#define fcoe_fcf_dev_loss_tmo(x) \
91 ((x)->dev_loss_tmo)
92#define fcoe_fcf_selected(x) \
93 ((x)->selected)
94#define fcoe_fcf_vlan_id(x) \
95 ((x)->vlan_id)
96
97/*
98 * dev_loss_tmo attribute
99 */
100static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
101{
102 int ret;
103
104 ret = kstrtoul(buf, 0, val);
105 if (ret || *val < 0)
106 return -EINVAL;
107 /*
108 * Check for overflow; dev_loss_tmo is u32
109 */
110 if (*val > UINT_MAX)
111 return -EINVAL;
112
113 return 0;
114}
115
116static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
117 unsigned long val)
118{
119 if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
120 (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
121 (fcf->state == FCOE_FCF_STATE_DELETED))
122 return -EBUSY;
123 /*
124 * Check for overflow; dev_loss_tmo is u32
125 */
126 if (val > UINT_MAX)
127 return -EINVAL;
128
129 fcoe_fcf_dev_loss_tmo(fcf) = val;
130 return 0;
131}
132
133#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
134struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
135 __ATTR(_name, _mode, _show, _store)
136
137#define fcoe_ctlr_show_function(field, format_string, sz, cast) \
138static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
139 struct device_attribute *attr, \
140 char *buf) \
141{ \
142 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
143 if (ctlr->f->get_fcoe_ctlr_##field) \
144 ctlr->f->get_fcoe_ctlr_##field(ctlr); \
145 return snprintf(buf, sz, format_string, \
146 cast fcoe_ctlr_##field(ctlr)); \
147}
148
149#define fcoe_fcf_show_function(field, format_string, sz, cast) \
150static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
151 struct device_attribute *attr, \
152 char *buf) \
153{ \
154 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
155 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \
156 if (ctlr->f->get_fcoe_fcf_##field) \
157 ctlr->f->get_fcoe_fcf_##field(fcf); \
158 return snprintf(buf, sz, format_string, \
159 cast fcoe_fcf_##field(fcf)); \
160}
161
162#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \
163static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
164 struct device_attribute *attr, \
165 char *buf) \
166{ \
167 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
168 return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
169}
170
171#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
172static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
173 struct device_attribute *attr, \
174 char *buf) \
175{ \
176 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
177 return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
178}
179
180#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \
181 fcoe_ctlr_private_show_function(field, format_string, sz, ) \
182 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
183 show_fcoe_ctlr_device_##field, NULL)
184
185#define fcoe_ctlr_rd_attr(field, format_string, sz) \
186 fcoe_ctlr_show_function(field, format_string, sz, ) \
187 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
188 show_fcoe_ctlr_device_##field, NULL)
189
190#define fcoe_fcf_rd_attr(field, format_string, sz) \
191 fcoe_fcf_show_function(field, format_string, sz, ) \
192 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
193 show_fcoe_fcf_device_##field, NULL)
194
195#define fcoe_fcf_private_rd_attr(field, format_string, sz) \
196 fcoe_fcf_private_show_function(field, format_string, sz, ) \
197 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
198 show_fcoe_fcf_device_##field, NULL)
199
200#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
201 fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
202 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
203 show_fcoe_ctlr_device_##field, NULL)
204
205#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \
206 fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
207 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
208 show_fcoe_fcf_device_##field, NULL)
209
210#define fcoe_enum_name_search(title, table_type, table) \
211static const char *get_fcoe_##title##_name(enum table_type table_key) \
212{ \
213 int i; \
214 char *name = NULL; \
215 \
216 for (i = 0; i < ARRAY_SIZE(table); i++) { \
217 if (table[i].value == table_key) { \
218 name = table[i].name; \
219 break; \
220 } \
221 } \
222 return name; \
223}
224
225static struct {
226 enum fcf_state value;
227 char *name;
228} fcf_state_names[] = {
229 { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
230 { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
231 { FCOE_FCF_STATE_CONNECTED, "Connected" },
232};
233fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
234#define FCOE_FCF_STATE_MAX_NAMELEN 50
235
236static ssize_t show_fcf_state(struct device *dev,
237 struct device_attribute *attr,
238 char *buf)
239{
240 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
241 const char *name;
242 name = get_fcoe_fcf_state_name(fcf->state);
243 if (!name)
244 return -EINVAL;
245 return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
246}
247static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
248
249static struct {
250 enum fip_conn_type value;
251 char *name;
252} fip_conn_type_names[] = {
253 { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
254 { FIP_CONN_TYPE_FABRIC, "Fabric" },
255 { FIP_CONN_TYPE_VN2VN, "VN2VN" },
256};
257fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
258#define FCOE_CTLR_MODE_MAX_NAMELEN 50
259
260static ssize_t show_ctlr_mode(struct device *dev,
261 struct device_attribute *attr,
262 char *buf)
263{
264 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
265 const char *name;
266
267 if (ctlr->f->get_fcoe_ctlr_mode)
268 ctlr->f->get_fcoe_ctlr_mode(ctlr);
269
270 name = get_fcoe_ctlr_mode_name(ctlr->mode);
271 if (!name)
272 return -EINVAL;
273 return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
274 "%s\n", name);
275}
276static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
277 show_ctlr_mode, NULL);
278
279static ssize_t
280store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
281 struct device_attribute *attr,
282 const char *buf, size_t count)
283{
284 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
285 struct fcoe_fcf_device *fcf;
286 unsigned long val;
287 int rc;
288
289 rc = fcoe_str_to_dev_loss(buf, &val);
290 if (rc)
291 return rc;
292
293 fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
294 mutex_lock(&ctlr->lock);
295 list_for_each_entry(fcf, &ctlr->fcfs, peers)
296 fcoe_fcf_set_dev_loss_tmo(fcf, val);
297 mutex_unlock(&ctlr->lock);
298 return count;
299}
300fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
301static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
302 show_fcoe_ctlr_device_fcf_dev_loss_tmo,
303 store_private_fcoe_ctlr_fcf_dev_loss_tmo);
304
305/* Link Error Status Block (LESB) */
306fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
307fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
308fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
309fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
310fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
311fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
312
313fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
314fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
315fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
316fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
317fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
318fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
319fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
320fcoe_fcf_rd_attr(selected, "%u\n", 20);
321fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
322
323fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
324static ssize_t
325store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
326 const char *buf, size_t count)
327{
328 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
329 unsigned long val;
330 int rc;
331
332 rc = fcoe_str_to_dev_loss(buf, &val);
333 if (rc)
334 return rc;
335
336 rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
337 if (rc)
338 return rc;
339 return count;
340}
341static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
342 show_fcoe_fcf_device_dev_loss_tmo,
343 store_fcoe_fcf_dev_loss_tmo);
344
345static struct attribute *fcoe_ctlr_lesb_attrs[] = {
346 &device_attr_fcoe_ctlr_link_fail.attr,
347 &device_attr_fcoe_ctlr_vlink_fail.attr,
348 &device_attr_fcoe_ctlr_miss_fka.attr,
349 &device_attr_fcoe_ctlr_symb_err.attr,
350 &device_attr_fcoe_ctlr_err_block.attr,
351 &device_attr_fcoe_ctlr_fcs_error.attr,
352 NULL,
353};
354
355static struct attribute_group fcoe_ctlr_lesb_attr_group = {
356 .name = "lesb",
357 .attrs = fcoe_ctlr_lesb_attrs,
358};
359
360static struct attribute *fcoe_ctlr_attrs[] = {
361 &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
362 &device_attr_fcoe_ctlr_mode.attr,
363 NULL,
364};
365
366static struct attribute_group fcoe_ctlr_attr_group = {
367 .attrs = fcoe_ctlr_attrs,
368};
369
370static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
371 &fcoe_ctlr_attr_group,
372 &fcoe_ctlr_lesb_attr_group,
373 NULL,
374};
375
376static struct attribute *fcoe_fcf_attrs[] = {
377 &device_attr_fcoe_fcf_fabric_name.attr,
378 &device_attr_fcoe_fcf_switch_name.attr,
379 &device_attr_fcoe_fcf_dev_loss_tmo.attr,
380 &device_attr_fcoe_fcf_fc_map.attr,
381 &device_attr_fcoe_fcf_vfid.attr,
382 &device_attr_fcoe_fcf_mac.attr,
383 &device_attr_fcoe_fcf_priority.attr,
384 &device_attr_fcoe_fcf_fka_period.attr,
385 &device_attr_fcoe_fcf_state.attr,
386 &device_attr_fcoe_fcf_selected.attr,
387 &device_attr_fcoe_fcf_vlan_id.attr,
388 NULL
389};
390
391static struct attribute_group fcoe_fcf_attr_group = {
392 .attrs = fcoe_fcf_attrs,
393};
394
395static const struct attribute_group *fcoe_fcf_attr_groups[] = {
396 &fcoe_fcf_attr_group,
397 NULL,
398};
399
400struct bus_type fcoe_bus_type;
401
402static int fcoe_bus_match(struct device *dev,
403 struct device_driver *drv)
404{
405 if (dev->bus == &fcoe_bus_type)
406 return 1;
407 return 0;
408}
409
410/**
411 * fcoe_ctlr_device_release() - Release the FIP ctlr memory
412 * @dev: Pointer to the FIP ctlr's embedded device
413 *
414 * Called when the last FIP ctlr reference is released.
415 */
416static void fcoe_ctlr_device_release(struct device *dev)
417{
418 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
419 kfree(ctlr);
420}
421
422/**
423 * fcoe_fcf_device_release() - Release the FIP fcf memory
424 * @dev: Pointer to the fcf's embedded device
425 *
426 * Called when the last FIP fcf reference is released.
427 */
428static void fcoe_fcf_device_release(struct device *dev)
429{
430 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
431 kfree(fcf);
432}
433
434struct device_type fcoe_ctlr_device_type = {
435 .name = "fcoe_ctlr",
436 .groups = fcoe_ctlr_attr_groups,
437 .release = fcoe_ctlr_device_release,
438};
439
440struct device_type fcoe_fcf_device_type = {
441 .name = "fcoe_fcf",
442 .groups = fcoe_fcf_attr_groups,
443 .release = fcoe_fcf_device_release,
444};
445
446struct bus_type fcoe_bus_type = {
447 .name = "fcoe",
448 .match = &fcoe_bus_match,
449};
450
451/**
452 * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
453 * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
454 */
455void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
456{
457 if (!fcoe_ctlr_work_q(ctlr)) {
458 printk(KERN_ERR
459 "ERROR: FIP Ctlr '%d' attempted to flush work, "
460 "when no workqueue created.\n", ctlr->id);
461 dump_stack();
462 return;
463 }
464
465 flush_workqueue(fcoe_ctlr_work_q(ctlr));
466}
467
468/**
469 * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
470 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
471 * @work: Work to queue for execution
472 *
473 * Return value:
474 * 1 on success / 0 already queued / < 0 for error
475 */
476int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
477 struct work_struct *work)
478{
479 if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
480 printk(KERN_ERR
481 "ERROR: FIP Ctlr '%d' attempted to queue work, "
482 "when no workqueue created.\n", ctlr->id);
483 dump_stack();
484
485 return -EINVAL;
486 }
487
488 return queue_work(fcoe_ctlr_work_q(ctlr), work);
489}
490
491/**
492 * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
493 * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
494 */
495void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
496{
497 if (!fcoe_ctlr_devloss_work_q(ctlr)) {
498 printk(KERN_ERR
499 "ERROR: FIP Ctlr '%d' attempted to flush work, "
500 "when no workqueue created.\n", ctlr->id);
501 dump_stack();
502 return;
503 }
504
505 flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
506}
507
508/**
509 * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
510 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
511 * @work: Work to queue for execution
512 * @delay: jiffies to delay the work queuing
513 *
514 * Return value:
515 * 1 on success / 0 already queued / < 0 for error
516 */
517int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
518 struct delayed_work *work,
519 unsigned long delay)
520{
521 if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
522 printk(KERN_ERR
523 "ERROR: FIP Ctlr '%d' attempted to queue work, "
524 "when no workqueue created.\n", ctlr->id);
525 dump_stack();
526
527 return -EINVAL;
528 }
529
530 return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
531}
532
533static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
534 struct fcoe_fcf_device *old)
535{
536 if (new->switch_name == old->switch_name &&
537 new->fabric_name == old->fabric_name &&
538 new->fc_map == old->fc_map &&
539 compare_ether_addr(new->mac, old->mac) == 0)
540 return 1;
541 return 0;
542}
543
544/**
545 * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
546 * @parent: The parent device to which the fcoe_ctlr instance
547 * should be attached
548 * @f: The LLD's FCoE sysfs function template pointer
549 * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
550 *
551 * This routine allocates a FIP ctlr object with some additional memory
552 * for the LLD. The FIP ctlr is initialized, added to sysfs and then
553 * attributes are added to it.
554 */
555struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
556 struct fcoe_sysfs_function_template *f,
557 int priv_size)
558{
559 struct fcoe_ctlr_device *ctlr;
560 int error = 0;
561
562 ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
563 GFP_KERNEL);
564 if (!ctlr)
565 goto out;
566
567 ctlr->id = atomic_inc_return(&ctlr_num) - 1;
568 ctlr->f = f;
569 INIT_LIST_HEAD(&ctlr->fcfs);
570 mutex_init(&ctlr->lock);
571 ctlr->dev.parent = parent;
572 ctlr->dev.bus = &fcoe_bus_type;
573 ctlr->dev.type = &fcoe_ctlr_device_type;
574
575 ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
576
577 snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
578 "ctlr_wq_%d", ctlr->id);
579 ctlr->work_q = create_singlethread_workqueue(
580 ctlr->work_q_name);
581 if (!ctlr->work_q)
582 goto out_del;
583
584 snprintf(ctlr->devloss_work_q_name,
585 sizeof(ctlr->devloss_work_q_name),
586 "ctlr_dl_wq_%d", ctlr->id);
587 ctlr->devloss_work_q = create_singlethread_workqueue(
588 ctlr->devloss_work_q_name);
589 if (!ctlr->devloss_work_q)
590 goto out_del_q;
591
592 dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
593 error = device_register(&ctlr->dev);
594 if (error)
595 goto out_del_q2;
596
597 return ctlr;
598
599out_del_q2:
600 destroy_workqueue(ctlr->devloss_work_q);
601 ctlr->devloss_work_q = NULL;
602out_del_q:
603 destroy_workqueue(ctlr->work_q);
604 ctlr->work_q = NULL;
605out_del:
606 kfree(ctlr);
607out:
608 return NULL;
609}
610EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
611
612/**
613 * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
614 * @ctlr: A pointer to the ctlr to be deleted
615 *
616 * Deletes a FIP ctlr and any fcfs attached
617 * to it. Deleting fcfs will cause their childen
618 * to be deleted as well.
619 *
620 * The ctlr is detached from sysfs and it's resources
621 * are freed (work q), but the memory is not freed
622 * until its last reference is released.
623 *
624 * This routine expects no locks to be held before
625 * calling.
626 *
627 * TODO: Currently there are no callbacks to clean up LLD data
628 * for a fcoe_fcf_device. LLDs must keep this in mind as they need
629 * to clean up each of their LLD data for all fcoe_fcf_device before
630 * calling fcoe_ctlr_device_delete.
631 */
632void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
633{
634 struct fcoe_fcf_device *fcf, *next;
635 /* Remove any attached fcfs */
636 mutex_lock(&ctlr->lock);
637 list_for_each_entry_safe(fcf, next,
638 &ctlr->fcfs, peers) {
639 list_del(&fcf->peers);
640 fcf->state = FCOE_FCF_STATE_DELETED;
641 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
642 }
643 mutex_unlock(&ctlr->lock);
644
645 fcoe_ctlr_device_flush_work(ctlr);
646
647 destroy_workqueue(ctlr->devloss_work_q);
648 ctlr->devloss_work_q = NULL;
649 destroy_workqueue(ctlr->work_q);
650 ctlr->work_q = NULL;
651
652 device_unregister(&ctlr->dev);
653}
654EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
655
656/**
657 * fcoe_fcf_device_final_delete() - Final delete routine
658 * @work: The FIP fcf's embedded work struct
659 *
660 * It is expected that the fcf has been removed from
661 * the FIP ctlr's list before calling this routine.
662 */
663static void fcoe_fcf_device_final_delete(struct work_struct *work)
664{
665 struct fcoe_fcf_device *fcf =
666 container_of(work, struct fcoe_fcf_device, delete_work);
667 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
668
669 /*
670 * Cancel any outstanding timers. These should really exist
671 * only when rmmod'ing the LLDD and we're asking for
672 * immediate termination of the rports
673 */
674 if (!cancel_delayed_work(&fcf->dev_loss_work))
675 fcoe_ctlr_device_flush_devloss(ctlr);
676
677 device_unregister(&fcf->dev);
678}
679
680/**
681 * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
682 * @work: The FIP fcf's embedded work struct
683 *
684 * Removes the fcf from the FIP ctlr's list of fcfs and
685 * queues the final deletion.
686 */
687static void fip_timeout_deleted_fcf(struct work_struct *work)
688{
689 struct fcoe_fcf_device *fcf =
690 container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
691 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
692
693 mutex_lock(&ctlr->lock);
694
695 /*
696 * If the fcf is deleted or reconnected before the timer
697 * fires the devloss queue will be flushed, but the state will
698 * either be CONNECTED or DELETED. If that is the case we
699 * cancel deleting the fcf.
700 */
701 if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
702 goto out;
703
704 dev_printk(KERN_ERR, &fcf->dev,
705 "FIP fcf connection time out: removing fcf\n");
706
707 list_del(&fcf->peers);
708 fcf->state = FCOE_FCF_STATE_DELETED;
709 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
710
711out:
712 mutex_unlock(&ctlr->lock);
713}
714
715/**
716 * fcoe_fcf_device_delete() - Delete a FIP fcf
717 * @fcf: Pointer to the fcf which is to be deleted
718 *
719 * Queues the FIP fcf on the devloss workqueue
720 *
721 * Expects the ctlr_attrs mutex to be held for fcf
722 * state change.
723 */
724void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
725{
726 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
727 int timeout = fcf->dev_loss_tmo;
728
729 if (fcf->state != FCOE_FCF_STATE_CONNECTED)
730 return;
731
732 fcf->state = FCOE_FCF_STATE_DISCONNECTED;
733
734 /*
735 * FCF will only be re-connected by the LLD calling
736 * fcoe_fcf_device_add, and it should be setting up
737 * priv then.
738 */
739 fcf->priv = NULL;
740
741 fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
742 timeout * HZ);
743}
744EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
745
746/**
747 * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
748 * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent
749 * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
750 *
751 * Expects to be called with the ctlr->lock held
752 */
753struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
754 struct fcoe_fcf_device *new_fcf)
755{
756 struct fcoe_fcf_device *fcf;
757 int error = 0;
758
759 list_for_each_entry(fcf, &ctlr->fcfs, peers) {
760 if (fcoe_fcf_device_match(new_fcf, fcf)) {
761 if (fcf->state == FCOE_FCF_STATE_CONNECTED)
762 return fcf;
763
764 fcf->state = FCOE_FCF_STATE_CONNECTED;
765
766 if (!cancel_delayed_work(&fcf->dev_loss_work))
767 fcoe_ctlr_device_flush_devloss(ctlr);
768
769 return fcf;
770 }
771 }
772
773 fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
774 if (unlikely(!fcf))
775 goto out;
776
777 INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
778 INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
779
780 fcf->dev.parent = &ctlr->dev;
781 fcf->dev.bus = &fcoe_bus_type;
782 fcf->dev.type = &fcoe_fcf_device_type;
783 fcf->id = atomic_inc_return(&fcf_num) - 1;
784 fcf->state = FCOE_FCF_STATE_UNKNOWN;
785
786 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
787
788 dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
789
790 fcf->fabric_name = new_fcf->fabric_name;
791 fcf->switch_name = new_fcf->switch_name;
792 fcf->fc_map = new_fcf->fc_map;
793 fcf->vfid = new_fcf->vfid;
794 memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
795 fcf->priority = new_fcf->priority;
796 fcf->fka_period = new_fcf->fka_period;
797 fcf->selected = new_fcf->selected;
798
799 error = device_register(&fcf->dev);
800 if (error)
801 goto out_del;
802
803 fcf->state = FCOE_FCF_STATE_CONNECTED;
804 list_add_tail(&fcf->peers, &ctlr->fcfs);
805
806 return fcf;
807
808out_del:
809 kfree(fcf);
810out:
811 return NULL;
812}
813EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
814
815int __init fcoe_sysfs_setup(void)
816{
817 int error;
818
819 atomic_set(&ctlr_num, 0);
820 atomic_set(&fcf_num, 0);
821
822 error = bus_register(&fcoe_bus_type);
823 if (error)
824 return error;
825
826 return 0;
827}
828
829void __exit fcoe_sysfs_teardown(void)
830{
831 bus_unregister(&fcoe_bus_type);
832}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 710e149d41b6..b46f43dced78 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -815,9 +815,17 @@ out_nodev:
815 */ 815 */
816static int __init libfcoe_init(void) 816static int __init libfcoe_init(void)
817{ 817{
818 fcoe_transport_init(); 818 int rc = 0;
819 819
820 return 0; 820 rc = fcoe_transport_init();
821 if (rc)
822 return rc;
823
824 rc = fcoe_sysfs_setup();
825 if (rc)
826 fcoe_transport_exit();
827
828 return rc;
821} 829}
822module_init(libfcoe_init); 830module_init(libfcoe_init);
823 831
@@ -826,6 +834,7 @@ module_init(libfcoe_init);
826 */ 834 */
827static void __exit libfcoe_exit(void) 835static void __exit libfcoe_exit(void)
828{ 836{
837 fcoe_sysfs_teardown();
829 fcoe_transport_exit(); 838 fcoe_transport_exit();
830} 839}
831module_exit(libfcoe_exit); 840module_exit(libfcoe_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6102ef2cb2d8..9d46fcbe7755 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1792static inline u8 1792static inline u8
1793_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) 1793_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1794{ 1794{
1795 return ioc->cpu_msix_table[smp_processor_id()]; 1795 return ioc->cpu_msix_table[raw_smp_processor_id()];
1796} 1796}
1797 1797
1798/** 1798/**
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 6208d562890d..317a7fdc3b82 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -25,3 +25,12 @@ config SCSI_QLA_FC
25 Firmware images can be retrieved from: 25 Firmware images can be retrieved from:
26 26
27 ftp://ftp.qlogic.com/outgoing/linux/firmware/ 27 ftp://ftp.qlogic.com/outgoing/linux/firmware/
28
29config TCM_QLA2XXX
30 tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
31 depends on SCSI_QLA_FC && TARGET_CORE
32 select LIBFC
33 select BTREE
34 default n
35 ---help---
36 Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 5df782f4a097..dce7d788cdc9 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,5 +1,6 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
3 qla_nx.o 3 qla_nx.o qla_target.o
4 4
5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5926f5a87ea8..5ab953029f8d 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/kthread.h> 10#include <linux/kthread.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
@@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
576 scsi_block_requests(vha->host); 577 scsi_block_requests(vha->host);
577 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 578 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
578 if (IS_QLA82XX(ha)) { 579 if (IS_QLA82XX(ha)) {
580 ha->flags.isp82xx_no_md_cap = 1;
579 qla82xx_idc_lock(ha); 581 qla82xx_idc_lock(ha);
580 qla82xx_set_reset_owner(vha); 582 qla82xx_set_reset_owner(vha);
581 qla82xx_idc_unlock(ha); 583 qla82xx_idc_unlock(ha);
@@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
585 scsi_unblock_requests(vha->host); 587 scsi_unblock_requests(vha->host);
586 break; 588 break;
587 case 0x2025d: 589 case 0x2025d:
588 if (!IS_QLA81XX(ha)) 590 if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
589 return -EPERM; 591 return -EPERM;
590 592
591 ql_log(ql_log_info, vha, 0x706f, 593 ql_log(ql_log_info, vha, 0x706f,
@@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev,
1105 struct device_attribute *attr, char *buf) 1107 struct device_attribute *attr, char *buf)
1106{ 1108{
1107 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1109 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1108 struct qla_hw_data *ha = vha->hw;
1109 return snprintf(buf, PAGE_SIZE, "%d\n", 1110 return snprintf(buf, PAGE_SIZE, "%d\n",
1110 ha->qla_stats.total_isp_aborts); 1111 vha->qla_stats.total_isp_aborts);
1111} 1112}
1112 1113
1113static ssize_t 1114static ssize_t
@@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1154 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1155 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1155 struct qla_hw_data *ha = vha->hw; 1156 struct qla_hw_data *ha = vha->hw;
1156 1157
1157 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1158 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1158 return snprintf(buf, PAGE_SIZE, "\n"); 1159 return snprintf(buf, PAGE_SIZE, "\n");
1159 1160
1160 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1161 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1537 dma_addr_t stats_dma; 1538 dma_addr_t stats_dma;
1538 struct fc_host_statistics *pfc_host_stat; 1539 struct fc_host_statistics *pfc_host_stat;
1539 1540
1540 pfc_host_stat = &ha->fc_host_stat; 1541 pfc_host_stat = &vha->fc_host_stat;
1541 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1542 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1542 1543
1543 if (test_bit(UNLOADING, &vha->dpc_flags)) 1544 if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1580 pfc_host_stat->dumped_frames = stats->dumped_frames; 1581 pfc_host_stat->dumped_frames = stats->dumped_frames;
1581 pfc_host_stat->nos_count = stats->nos_rcvd; 1582 pfc_host_stat->nos_count = stats->nos_rcvd;
1582 } 1583 }
1583 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20; 1584 pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
1584 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20; 1585 pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
1585 1586
1586done_free: 1587done_free:
1587 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1588 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1737 fc_host_supported_speeds(vha->host) = 1738 fc_host_supported_speeds(vha->host) =
1738 fc_host_supported_speeds(base_vha->host); 1739 fc_host_supported_speeds(base_vha->host);
1739 1740
1741 qlt_vport_create(vha, ha);
1740 qla24xx_vport_disable(fc_vport, disable); 1742 qla24xx_vport_disable(fc_vport, disable);
1741 1743
1742 if (ha->flags.cpu_affinity_enabled) { 1744 if (ha->flags.cpu_affinity_enabled) {
@@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1951 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 1953 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1952 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1954 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1953 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1955 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1954 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 1956 fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
1957 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
1955 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1958 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1956 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1959 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1957 1960
1958 if (IS_CNA_CAPABLE(ha)) 1961 if (IS_CNA_CAPABLE(ha))
1959 speed = FC_PORTSPEED_10GBIT; 1962 speed = FC_PORTSPEED_10GBIT;
1963 else if (IS_QLA2031(ha))
1964 speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
1965 FC_PORTSPEED_4GBIT;
1960 else if (IS_QLA25XX(ha)) 1966 else if (IS_QLA25XX(ha))
1961 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1967 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
1962 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1968 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index bc3cc6d91117..c68883806c54 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
297 297
298 /* Initialize all required fields of fcport */ 298 /* Initialize all required fields of fcport */
299 fcport->vha = vha; 299 fcport->vha = vha;
300 fcport->vp_idx = vha->vp_idx;
301 fcport->d_id.b.al_pa = 300 fcport->d_id.b.al_pa =
302 bsg_job->request->rqst_data.h_els.port_id[0]; 301 bsg_job->request->rqst_data.h_els.port_id[0];
303 fcport->d_id.b.area = 302 fcport->d_id.b.area =
@@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
483 482
484 /* Initialize all required fields of fcport */ 483 /* Initialize all required fields of fcport */
485 fcport->vha = vha; 484 fcport->vha = vha;
486 fcport->vp_idx = vha->vp_idx;
487 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; 485 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
488 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; 486 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
489 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; 487 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
544 int rval = 0; 542 int rval = 0;
545 struct qla_hw_data *ha = vha->hw; 543 struct qla_hw_data *ha = vha->hw;
546 544
547 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 545 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
548 goto done_set_internal; 546 goto done_set_internal;
549 547
550 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 548 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
586 uint16_t new_config[4]; 584 uint16_t new_config[4];
587 struct qla_hw_data *ha = vha->hw; 585 struct qla_hw_data *ha = vha->hw;
588 586
589 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 587 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
590 goto done_reset_internal; 588 goto done_reset_internal;
591 589
592 memset(new_config, 0 , sizeof(new_config)); 590 memset(new_config, 0 , sizeof(new_config));
@@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
710 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 708 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
711 709
712 if ((ha->current_topology == ISP_CFG_F || 710 if ((ha->current_topology == ISP_CFG_F ||
713 (atomic_read(&vha->loop_state) == LOOP_DOWN) || 711 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
714 ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
715 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 712 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
716 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 713 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
717 elreq.options == EXTERNAL_LOOPBACK) { 714 elreq.options == EXTERNAL_LOOPBACK) {
@@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1402 if (rval) 1399 if (rval)
1403 return rval; 1400 return rval;
1404 1401
1402 /* Set the isp82xx_no_md_cap not to capture minidump */
1403 ha->flags.isp82xx_no_md_cap = 1;
1404
1405 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1405 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1406 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1406 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1407 ha->optrom_region_size); 1407 ha->optrom_region_size);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 62324a1d5573..fdee5611f3e2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,27 +11,31 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa | 14 * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa |
15 * | Mailbox commands | 0x113e | 0x112c-0x112e | 15 * | Mailbox commands | 0x1140 | 0x111a-0x111b |
16 * | | | 0x112c-0x112e |
16 * | | | 0x113a | 17 * | | | 0x113a |
17 * | Device Discovery | 0x2086 | 0x2020-0x2022 | 18 * | Device Discovery | 0x2086 | 0x2020-0x2022 |
18 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | 19 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
19 * | | | 0x302d-0x302e | 20 * | | | 0x302d-0x302e |
20 * | DPC Thread | 0x401c | | 21 * | DPC Thread | 0x401c | 0x4002,0x4013 |
21 * | Async Events | 0x505d | 0x502b-0x502f | 22 * | Async Events | 0x505f | 0x502b-0x502f |
22 * | | | 0x5047,0x5052 | 23 * | | | 0x5047,0x5052 |
23 * | Timer Routines | 0x6011 | 0x600e-0x600f | 24 * | Timer Routines | 0x6011 | |
24 * | User Space Interactions | 0x709f | 0x7018,0x702e, | 25 * | User Space Interactions | 0x709f | 0x7018,0x702e, |
25 * | | | 0x7039,0x7045, | 26 * | | | 0x7039,0x7045, |
26 * | | | 0x7073-0x7075, | 27 * | | | 0x7073-0x7075, |
27 * | | | 0x708c | 28 * | | | 0x708c |
28 * | Task Management | 0x803c | 0x8025-0x8026 | 29 * | Task Management | 0x803c | 0x8025-0x8026 |
29 * | | | 0x800b,0x8039 | 30 * | | | 0x800b,0x8039 |
30 * | AER/EEH | 0x900f | | 31 * | AER/EEH | 0x9011 | |
31 * | Virtual Port | 0xa007 | | 32 * | Virtual Port | 0xa007 | |
32 * | ISP82XX Specific | 0xb054 | 0xb053 | 33 * | ISP82XX Specific | 0xb054 | 0xb024 |
33 * | MultiQ | 0xc00c | | 34 * | MultiQ | 0xc00c | |
34 * | Misc | 0xd010 | | 35 * | Misc | 0xd010 | |
36 * | Target Mode | 0xe06f | |
37 * | Target Mode Management | 0xf071 | |
38 * | Target Mode Task Management | 0x1000b | |
35 * ---------------------------------------------------------------------- 39 * ----------------------------------------------------------------------
36 */ 40 */
37 41
@@ -379,6 +383,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
379} 383}
380 384
381static inline void * 385static inline void *
386qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
387 uint32_t **last_chain)
388{
389 struct qla2xxx_mqueue_chain *q;
390 struct qla2xxx_mqueue_header *qh;
391 uint32_t num_queues;
392 int que;
393 struct {
394 int length;
395 void *ring;
396 } aq, *aqp;
397
398 if (!ha->tgt.atio_q_length)
399 return ptr;
400
401 num_queues = 1;
402 aqp = &aq;
403 aqp->length = ha->tgt.atio_q_length;
404 aqp->ring = ha->tgt.atio_ring;
405
406 for (que = 0; que < num_queues; que++) {
407 /* aqp = ha->atio_q_map[que]; */
408 q = ptr;
409 *last_chain = &q->type;
410 q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
411 q->chain_size = htonl(
412 sizeof(struct qla2xxx_mqueue_chain) +
413 sizeof(struct qla2xxx_mqueue_header) +
414 (aqp->length * sizeof(request_t)));
415 ptr += sizeof(struct qla2xxx_mqueue_chain);
416
417 /* Add header. */
418 qh = ptr;
419 qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
420 qh->number = htonl(que);
421 qh->size = htonl(aqp->length * sizeof(request_t));
422 ptr += sizeof(struct qla2xxx_mqueue_header);
423
424 /* Add data. */
425 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
426
427 ptr += aqp->length * sizeof(request_t);
428 }
429
430 return ptr;
431}
432
433static inline void *
382qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 434qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
383{ 435{
384 struct qla2xxx_mqueue_chain *q; 436 struct qla2xxx_mqueue_chain *q;
@@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
873 struct qla24xx_fw_dump *fw; 925 struct qla24xx_fw_dump *fw;
874 uint32_t ext_mem_cnt; 926 uint32_t ext_mem_cnt;
875 void *nxt; 927 void *nxt;
928 void *nxt_chain;
929 uint32_t *last_chain = NULL;
876 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 930 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
877 931
878 if (IS_QLA82XX(ha)) 932 if (IS_QLA82XX(ha))
@@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1091 1145
1092 qla24xx_copy_eft(ha, nxt); 1146 qla24xx_copy_eft(ha, nxt);
1093 1147
1148 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1149 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1150 if (last_chain) {
1151 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1152 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1153 }
1154
1155 /* Adjust valid length. */
1156 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1157
1094qla24xx_fw_dump_failed_0: 1158qla24xx_fw_dump_failed_0:
1095 qla2xxx_dump_post_process(base_vha, rval); 1159 qla2xxx_dump_post_process(base_vha, rval);
1096 1160
@@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1399 /* Chain entries -- started with MQ. */ 1463 /* Chain entries -- started with MQ. */
1400 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1464 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1401 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1465 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1466 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1402 if (last_chain) { 1467 if (last_chain) {
1403 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1468 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1404 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1469 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1717 /* Chain entries -- started with MQ. */ 1782 /* Chain entries -- started with MQ. */
1718 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1783 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1719 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1784 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1785 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1720 if (last_chain) { 1786 if (last_chain) {
1721 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1787 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1722 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1788 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -2218,6 +2284,7 @@ copy_queue:
2218 /* Chain entries -- started with MQ. */ 2284 /* Chain entries -- started with MQ. */
2219 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 2285 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2220 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 2286 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2287 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2221 if (last_chain) { 2288 if (last_chain) {
2222 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 2289 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
2223 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 2290 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2157bdf1569a..f278df8cce0f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header {
244 uint32_t queue; 244 uint32_t queue;
245#define TYPE_REQUEST_QUEUE 0x1 245#define TYPE_REQUEST_QUEUE 0x1
246#define TYPE_RESPONSE_QUEUE 0x2 246#define TYPE_RESPONSE_QUEUE 0x2
247#define TYPE_ATIO_QUEUE 0x3
247 uint32_t number; 248 uint32_t number;
248 uint32_t size; 249 uint32_t size;
249}; 250};
@@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
339#define ql_dbg_misc 0x00010000 /* For dumping everything that is not 340#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
340 * not covered by upper categories 341 * not covered by upper categories
341 */ 342 */
343#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
344 * This is to be used with other levels where
345 * more verbosity is required. It might not
346 * be applicable to all the levels.
347 */
348#define ql_dbg_tgt 0x00004000 /* Target mode */
349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a2443031dbe7..39007f53aec0 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -186,6 +186,7 @@
186#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 186#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
187#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 187#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
188#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ 188#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
189#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
189 190
190struct req_que; 191struct req_que;
191 192
@@ -1234,11 +1235,27 @@ typedef struct {
1234 * ISP queue - response queue entry definition. 1235 * ISP queue - response queue entry definition.
1235 */ 1236 */
1236typedef struct { 1237typedef struct {
1237 uint8_t data[60]; 1238 uint8_t entry_type; /* Entry type. */
1239 uint8_t entry_count; /* Entry count. */
1240 uint8_t sys_define; /* System defined. */
1241 uint8_t entry_status; /* Entry Status. */
1242 uint32_t handle; /* System defined handle */
1243 uint8_t data[52];
1238 uint32_t signature; 1244 uint32_t signature;
1239#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ 1245#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
1240} response_t; 1246} response_t;
1241 1247
1248/*
1249 * ISP queue - ATIO queue entry definition.
1250 */
1251struct atio {
1252 uint8_t entry_type; /* Entry type. */
1253 uint8_t entry_count; /* Entry count. */
1254 uint8_t data[58];
1255 uint32_t signature;
1256#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
1257};
1258
1242typedef union { 1259typedef union {
1243 uint16_t extended; 1260 uint16_t extended;
1244 struct { 1261 struct {
@@ -1719,11 +1736,13 @@ typedef struct fc_port {
1719 struct fc_rport *rport, *drport; 1736 struct fc_rport *rport, *drport;
1720 u32 supported_classes; 1737 u32 supported_classes;
1721 1738
1722 uint16_t vp_idx;
1723 uint8_t fc4_type; 1739 uint8_t fc4_type;
1724 uint8_t scan_state; 1740 uint8_t scan_state;
1725} fc_port_t; 1741} fc_port_t;
1726 1742
1743#define QLA_FCPORT_SCAN_NONE 0
1744#define QLA_FCPORT_SCAN_FOUND 1
1745
1727/* 1746/*
1728 * Fibre channel port/lun states. 1747 * Fibre channel port/lun states.
1729 */ 1748 */
@@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = {
1747#define FCF_LOGIN_NEEDED BIT_1 1766#define FCF_LOGIN_NEEDED BIT_1
1748#define FCF_FCP2_DEVICE BIT_2 1767#define FCF_FCP2_DEVICE BIT_2
1749#define FCF_ASYNC_SENT BIT_3 1768#define FCF_ASYNC_SENT BIT_3
1769#define FCF_CONF_COMP_SUPPORTED BIT_4
1750 1770
1751/* No loop ID flag. */ 1771/* No loop ID flag. */
1752#define FC_NO_LOOP_ID 0x1000 1772#define FC_NO_LOOP_ID 0x1000
@@ -2419,6 +2439,40 @@ struct qlfc_fw {
2419 uint32_t len; 2439 uint32_t len;
2420}; 2440};
2421 2441
2442struct qlt_hw_data {
2443 /* Protected by hw lock */
2444 uint32_t enable_class_2:1;
2445 uint32_t enable_explicit_conf:1;
2446 uint32_t ini_mode_force_reverse:1;
2447 uint32_t node_name_set:1;
2448
2449 dma_addr_t atio_dma; /* Physical address. */
2450 struct atio *atio_ring; /* Base virtual address */
2451 struct atio *atio_ring_ptr; /* Current address. */
2452 uint16_t atio_ring_index; /* Current index. */
2453 uint16_t atio_q_length;
2454
2455 void *target_lport_ptr;
2456 struct qla_tgt_func_tmpl *tgt_ops;
2457 struct qla_tgt *qla_tgt;
2458 struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
2459 uint16_t current_handle;
2460
2461 struct qla_tgt_vp_map *tgt_vp_map;
2462 struct mutex tgt_mutex;
2463 struct mutex tgt_host_action_mutex;
2464
2465 int saved_set;
2466 uint16_t saved_exchange_count;
2467 uint32_t saved_firmware_options_1;
2468 uint32_t saved_firmware_options_2;
2469 uint32_t saved_firmware_options_3;
2470 uint8_t saved_firmware_options[2];
2471 uint8_t saved_add_firmware_options[2];
2472
2473 uint8_t tgt_node_name[WWN_SIZE];
2474};
2475
2422/* 2476/*
2423 * Qlogic host adapter specific data structure. 2477 * Qlogic host adapter specific data structure.
2424*/ 2478*/
@@ -2460,7 +2514,9 @@ struct qla_hw_data {
2460 uint32_t thermal_supported:1; 2514 uint32_t thermal_supported:1;
2461 uint32_t isp82xx_reset_hdlr_active:1; 2515 uint32_t isp82xx_reset_hdlr_active:1;
2462 uint32_t isp82xx_reset_owner:1; 2516 uint32_t isp82xx_reset_owner:1;
2463 /* 28 bits */ 2517 uint32_t isp82xx_no_md_cap:1;
2518 uint32_t host_shutting_down:1;
2519 /* 30 bits */
2464 } flags; 2520 } flags;
2465 2521
2466 /* This spinlock is used to protect "io transactions", you must 2522 /* This spinlock is used to protect "io transactions", you must
@@ -2804,7 +2860,6 @@ struct qla_hw_data {
2804 /* ISP2322: red, green, amber. */ 2860 /* ISP2322: red, green, amber. */
2805 uint16_t zio_mode; 2861 uint16_t zio_mode;
2806 uint16_t zio_timer; 2862 uint16_t zio_timer;
2807 struct fc_host_statistics fc_host_stat;
2808 2863
2809 struct qla_msix_entry *msix_entries; 2864 struct qla_msix_entry *msix_entries;
2810 2865
@@ -2817,7 +2872,6 @@ struct qla_hw_data {
2817 int cur_vport_count; 2872 int cur_vport_count;
2818 2873
2819 struct qla_chip_state_84xx *cs84xx; 2874 struct qla_chip_state_84xx *cs84xx;
2820 struct qla_statistics qla_stats;
2821 struct isp_operations *isp_ops; 2875 struct isp_operations *isp_ops;
2822 struct workqueue_struct *wq; 2876 struct workqueue_struct *wq;
2823 struct qlfc_fw fw_buf; 2877 struct qlfc_fw fw_buf;
@@ -2863,6 +2917,8 @@ struct qla_hw_data {
2863 dma_addr_t md_tmplt_hdr_dma; 2917 dma_addr_t md_tmplt_hdr_dma;
2864 void *md_dump; 2918 void *md_dump;
2865 uint32_t md_dump_size; 2919 uint32_t md_dump_size;
2920
2921 struct qlt_hw_data tgt;
2866}; 2922};
2867 2923
2868/* 2924/*
@@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host {
2920#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ 2976#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
2921#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ 2977#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
2922#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ 2978#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
2979#define SCR_PENDING 21 /* SCR in target mode */
2923 2980
2924 uint32_t device_flags; 2981 uint32_t device_flags;
2925#define SWITCH_FOUND BIT_0 2982#define SWITCH_FOUND BIT_0
@@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host {
2979 struct req_que *req; 3036 struct req_que *req;
2980 int fw_heartbeat_counter; 3037 int fw_heartbeat_counter;
2981 int seconds_since_last_heartbeat; 3038 int seconds_since_last_heartbeat;
3039 struct fc_host_statistics fc_host_stat;
3040 struct qla_statistics qla_stats;
2982 3041
2983 atomic_t vref_count; 3042 atomic_t vref_count;
2984} scsi_qla_host_t; 3043} scsi_qla_host_t;
2985 3044
3045#define SET_VP_IDX 1
3046#define SET_AL_PA 2
3047#define RESET_VP_IDX 3
3048#define RESET_AL_PA 4
3049struct qla_tgt_vp_map {
3050 uint8_t idx;
3051 scsi_qla_host_t *vha;
3052};
3053
2986/* 3054/*
2987 * Macros to help code, maintain, etc. 3055 * Macros to help code, maintain, etc.
2988 */ 3056 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9f065804bd12..9eacd2df111b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -175,6 +175,7 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
175/* 175/*
176 * Global Function Prototypes in qla_iocb.c source file. 176 * Global Function Prototypes in qla_iocb.c source file.
177 */ 177 */
178
178extern uint16_t qla2x00_calc_iocbs_32(uint16_t); 179extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
179extern uint16_t qla2x00_calc_iocbs_64(uint16_t); 180extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
180extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); 181extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
188extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
189extern int qla24xx_dif_start_scsi(srb_t *); 190extern int qla24xx_dif_start_scsi(srb_t *);
190 191
192extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
193extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
191 194
192/* 195/*
193 * Global Function Prototypes in qla_mbx.c source file. 196 * Global Function Prototypes in qla_mbx.c source file.
@@ -239,6 +242,9 @@ extern int
239qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); 242qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
240 243
241extern int 244extern int
245qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
246
247extern int
242qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); 248qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
243 249
244extern int 250extern int
@@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
383extern void qla2x00_free_irqs(scsi_qla_host_t *); 389extern void qla2x00_free_irqs(scsi_qla_host_t *);
384 390
385extern int qla2x00_get_data_rate(scsi_qla_host_t *); 391extern int qla2x00_get_data_rate(scsi_qla_host_t *);
392extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
393
386/* 394/*
387 * Global Function Prototypes in qla_sup.c source file. 395 * Global Function Prototypes in qla_sup.c source file.
388 */ 396 */
@@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *);
546extern void qla2x00_sp_timeout(unsigned long); 554extern void qla2x00_sp_timeout(unsigned long);
547extern void qla2x00_bsg_job_done(void *, void *, int); 555extern void qla2x00_bsg_job_done(void *, void *, int);
548extern void qla2x00_bsg_sp_free(void *, void *); 556extern void qla2x00_bsg_sp_free(void *, void *);
557extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
549 558
550/* Interrupt related */ 559/* Interrupt related */
551extern irqreturn_t qla82xx_intr_handler(int, void *); 560extern irqreturn_t qla82xx_intr_handler(int, void *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 3128f80441f5..05260d25fe46 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); 10static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
10static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); 11static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
556 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; 557 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
557 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; 558 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
558 559
559 ct_req->req.rff_id.fc4_feature = BIT_1; 560 qlt_rff_id(vha, ct_req);
561
560 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ 562 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
561 563
562 /* Execute MS IOCB */ 564 /* Execute MS IOCB */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b9465643396b..ca5084743135 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,6 +17,9 @@
17#include <asm/prom.h> 17#include <asm/prom.h>
18#endif 18#endif
19 19
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
20/* 23/*
21* QLogic ISP2x00 Hardware Support Function Prototypes. 24* QLogic ISP2x00 Hardware Support Function Prototypes.
22*/ 25*/
@@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
518 return QLA_FUNCTION_FAILED; 521 return QLA_FUNCTION_FAILED;
519 } 522 }
520 } 523 }
521 rval = qla2x00_init_rings(vha); 524
525 if (qla_ini_mode_enabled(vha))
526 rval = qla2x00_init_rings(vha);
527
522 ha->flags.chip_reset_done = 1; 528 ha->flags.chip_reset_done = 1;
523 529
524 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 530 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1233 mq_size += ha->max_rsp_queues * 1239 mq_size += ha->max_rsp_queues *
1234 (rsp->length * sizeof(response_t)); 1240 (rsp->length * sizeof(response_t));
1235 } 1241 }
1242 if (ha->tgt.atio_q_length)
1243 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
1236 /* Allocate memory for Fibre Channel Event Buffer. */ 1244 /* Allocate memory for Fibre Channel Event Buffer. */
1237 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1245 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1238 goto try_eft; 1246 goto try_eft;
@@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1696 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1704 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1697 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1705 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1698 1706
1707 /* Setup ATIO queue dma pointers for target mode */
1708 icb->atio_q_inpointer = __constant_cpu_to_le16(0);
1709 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
1710 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
1711 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
1712
1699 if (ha->mqenable || IS_QLA83XX(ha)) { 1713 if (ha->mqenable || IS_QLA83XX(ha)) {
1700 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 1714 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1701 icb->rid = __constant_cpu_to_le16(rid); 1715 icb->rid = __constant_cpu_to_le16(rid);
@@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1739 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0); 1753 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1740 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0); 1754 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1741 } 1755 }
1756 qlt_24xx_config_rings(vha, reg);
1757
1742 /* PCI posting */ 1758 /* PCI posting */
1743 RD_REG_DWORD(&ioreg->hccr); 1759 RD_REG_DWORD(&ioreg->hccr);
1744} 1760}
@@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1794 1810
1795 spin_unlock(&ha->vport_slock); 1811 spin_unlock(&ha->vport_slock);
1796 1812
1813 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
1814 ha->tgt.atio_ring_index = 0;
1815 /* Initialize ATIO queue entries */
1816 qlt_init_atio_q_entries(vha);
1817
1797 ha->isp_ops->config_rings(vha); 1818 ha->isp_ops->config_rings(vha);
1798 1819
1799 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1820 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2051 vha->d_id.b.area = area; 2072 vha->d_id.b.area = area;
2052 vha->d_id.b.al_pa = al_pa; 2073 vha->d_id.b.al_pa = al_pa;
2053 2074
2075 spin_lock(&ha->vport_slock);
2076 qlt_update_vp_map(vha, SET_AL_PA);
2077 spin_unlock(&ha->vport_slock);
2078
2054 if (!vha->flags.init_done) 2079 if (!vha->flags.init_done)
2055 ql_log(ql_log_info, vha, 0x2010, 2080 ql_log(ql_log_info, vha, 0x2010,
2056 "Topology - %s, Host Loop address 0x%x.\n", 2081 "Topology - %s, Host Loop address 0x%x.\n",
@@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2185 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 2210 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2186 /* Reset NVRAM data. */ 2211 /* Reset NVRAM data. */
2187 ql_log(ql_log_warn, vha, 0x0064, 2212 ql_log(ql_log_warn, vha, 0x0064,
2188 "Inconisistent NVRAM " 2213 "Inconsistent NVRAM "
2189 "detected: checksum=0x%x id=%c version=0x%x.\n", 2214 "detected: checksum=0x%x id=%c version=0x%x.\n",
2190 chksum, nv->id[0], nv->nvram_version); 2215 chksum, nv->id[0], nv->nvram_version);
2191 ql_log(ql_log_warn, vha, 0x0065, 2216 ql_log(ql_log_warn, vha, 0x0065,
@@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2270 if (IS_QLA23XX(ha)) { 2295 if (IS_QLA23XX(ha)) {
2271 nv->firmware_options[0] |= BIT_2; 2296 nv->firmware_options[0] |= BIT_2;
2272 nv->firmware_options[0] &= ~BIT_3; 2297 nv->firmware_options[0] &= ~BIT_3;
2273 nv->firmware_options[0] &= ~BIT_6; 2298 nv->special_options[0] &= ~BIT_6;
2274 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 2299 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2275 2300
2276 if (IS_QLA2300(ha)) { 2301 if (IS_QLA2300(ha)) {
@@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data)
2467{ 2492{
2468 fc_port_t *fcport = data; 2493 fc_port_t *fcport = data;
2469 struct fc_rport *rport; 2494 struct fc_rport *rport;
2495 scsi_qla_host_t *vha = fcport->vha;
2470 unsigned long flags; 2496 unsigned long flags;
2471 2497
2472 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2498 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2473 rport = fcport->drport ? fcport->drport: fcport->rport; 2499 rport = fcport->drport ? fcport->drport: fcport->rport;
2474 fcport->drport = NULL; 2500 fcport->drport = NULL;
2475 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2501 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2476 if (rport) 2502 if (rport) {
2477 fc_remote_port_delete(rport); 2503 fc_remote_port_delete(rport);
2504 /*
2505 * Release the target mode FC NEXUS in qla_target.c code
2506 * if target mod is enabled.
2507 */
2508 qlt_fc_port_deleted(vha, fcport);
2509 }
2478} 2510}
2479 2511
2480/** 2512/**
@@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2495 2527
2496 /* Setup fcport template structure. */ 2528 /* Setup fcport template structure. */
2497 fcport->vha = vha; 2529 fcport->vha = vha;
2498 fcport->vp_idx = vha->vp_idx;
2499 fcport->port_type = FCT_UNKNOWN; 2530 fcport->port_type = FCT_UNKNOWN;
2500 fcport->loop_id = FC_NO_LOOP_ID; 2531 fcport->loop_id = FC_NO_LOOP_ID;
2501 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 2532 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
2502 fcport->supported_classes = FC_COS_UNSPECIFIED; 2533 fcport->supported_classes = FC_COS_UNSPECIFIED;
2534 fcport->scan_state = QLA_FCPORT_SCAN_NONE;
2503 2535
2504 return fcport; 2536 return fcport;
2505} 2537}
@@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2726 new_fcport->d_id.b.area = area; 2758 new_fcport->d_id.b.area = area;
2727 new_fcport->d_id.b.al_pa = al_pa; 2759 new_fcport->d_id.b.al_pa = al_pa;
2728 new_fcport->loop_id = loop_id; 2760 new_fcport->loop_id = loop_id;
2729 new_fcport->vp_idx = vha->vp_idx;
2730 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2761 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2731 if (rval2 != QLA_SUCCESS) { 2762 if (rval2 != QLA_SUCCESS) {
2732 ql_dbg(ql_dbg_disc, vha, 0x201a, 2763 ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2760 2791
2761 if (!found) { 2792 if (!found) {
2762 /* New device, add to fcports list. */ 2793 /* New device, add to fcports list. */
2763 if (vha->vp_idx) {
2764 new_fcport->vha = vha;
2765 new_fcport->vp_idx = vha->vp_idx;
2766 }
2767 list_add_tail(&new_fcport->list, &vha->vp_fcports); 2794 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2768 2795
2769 /* Allocate a new replacement fcport. */ 2796 /* Allocate a new replacement fcport. */
@@ -2800,8 +2827,6 @@ cleanup_allocation:
2800static void 2827static void
2801qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2828qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2802{ 2829{
2803#define LS_UNKNOWN 2
2804 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2805 char *link_speed; 2830 char *link_speed;
2806 int rval; 2831 int rval;
2807 uint16_t mb[4]; 2832 uint16_t mb[4];
@@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2829 fcport->port_name[6], fcport->port_name[7], rval, 2854 fcport->port_name[6], fcport->port_name[7], rval,
2830 fcport->fp_speed, mb[0], mb[1]); 2855 fcport->fp_speed, mb[0], mb[1]);
2831 } else { 2856 } else {
2832 link_speed = link_speeds[LS_UNKNOWN]; 2857 link_speed = qla2x00_get_link_speed_str(ha);
2833 if (fcport->fp_speed < 5)
2834 link_speed = link_speeds[fcport->fp_speed];
2835 else if (fcport->fp_speed == 0x13)
2836 link_speed = link_speeds[5];
2837 ql_dbg(ql_dbg_disc, vha, 0x2005, 2858 ql_dbg(ql_dbg_disc, vha, 0x2005,
2838 "iIDMA adjusted to %s GB/s " 2859 "iIDMA adjusted to %s GB/s "
2839 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed, 2860 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2864 "Unable to allocate fc remote port.\n"); 2885 "Unable to allocate fc remote port.\n");
2865 return; 2886 return;
2866 } 2887 }
2888 /*
2889 * Create target mode FC NEXUS in qla_target.c if target mode is
2890 * enabled..
2891 */
2892 qlt_fc_port_added(vha, fcport);
2893
2867 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2894 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2868 *((fc_port_t **)rport->dd_data) = fcport; 2895 *((fc_port_t **)rport->dd_data) = fcport;
2869 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2896 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@@ -2921,7 +2948,7 @@ static int
2921qla2x00_configure_fabric(scsi_qla_host_t *vha) 2948qla2x00_configure_fabric(scsi_qla_host_t *vha)
2922{ 2949{
2923 int rval; 2950 int rval;
2924 fc_port_t *fcport, *fcptemp; 2951 fc_port_t *fcport;
2925 uint16_t next_loopid; 2952 uint16_t next_loopid;
2926 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2953 uint16_t mb[MAILBOX_REGISTER_COUNT];
2927 uint16_t loop_id; 2954 uint16_t loop_id;
@@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2959 0xfc, mb, BIT_1|BIT_0); 2986 0xfc, mb, BIT_1|BIT_0);
2960 if (rval != QLA_SUCCESS) { 2987 if (rval != QLA_SUCCESS) {
2961 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2988 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2962 return rval; 2989 break;
2963 } 2990 }
2964 if (mb[0] != MBS_COMMAND_COMPLETE) { 2991 if (mb[0] != MBS_COMMAND_COMPLETE) {
2965 ql_dbg(ql_dbg_disc, vha, 0x2042, 2992 ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2991 } 3018 }
2992 } 3019 }
2993 3020
2994#define QLA_FCPORT_SCAN 1
2995#define QLA_FCPORT_FOUND 2
2996
2997 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2998 fcport->scan_state = QLA_FCPORT_SCAN;
2999 }
3000
3001 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3021 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3002 if (rval != QLA_SUCCESS) 3022 if (rval != QLA_SUCCESS)
3003 break; 3023 break;
3004 3024
3005 /* 3025 /* Add new ports to existing port list */
3006 * Logout all previous fabric devices marked lost, except 3026 list_splice_tail_init(&new_fcports, &vha->vp_fcports);
3007 * FCP2 devices. 3027
3008 */ 3028 /* Starting free loop ID. */
3029 next_loopid = ha->min_external_loopid;
3030
3009 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3031 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3010 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3032 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3011 break; 3033 break;
@@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3013 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3035 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3014 continue; 3036 continue;
3015 3037
3016 if (fcport->scan_state == QLA_FCPORT_SCAN && 3038 /* Logout lost/gone fabric devices (non-FCP2) */
3039 if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
3017 atomic_read(&fcport->state) == FCS_ONLINE) { 3040 atomic_read(&fcport->state) == FCS_ONLINE) {
3018 qla2x00_mark_device_lost(vha, fcport, 3041 qla2x00_mark_device_lost(vha, fcport,
3019 ql2xplogiabsentdevice, 0); 3042 ql2xplogiabsentdevice, 0);
@@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3026 fcport->d_id.b.domain, 3049 fcport->d_id.b.domain,
3027 fcport->d_id.b.area, 3050 fcport->d_id.b.area,
3028 fcport->d_id.b.al_pa); 3051 fcport->d_id.b.al_pa);
3029 fcport->loop_id = FC_NO_LOOP_ID;
3030 } 3052 }
3031 }
3032 }
3033
3034 /* Starting free loop ID. */
3035 next_loopid = ha->min_external_loopid;
3036
3037 /*
3038 * Scan through our port list and login entries that need to be
3039 * logged in.
3040 */
3041 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3042 if (atomic_read(&vha->loop_down_timer) ||
3043 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3044 break;
3045
3046 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3047 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3048 continue; 3053 continue;
3049
3050 if (fcport->loop_id == FC_NO_LOOP_ID) {
3051 fcport->loop_id = next_loopid;
3052 rval = qla2x00_find_new_loop_id(
3053 base_vha, fcport);
3054 if (rval != QLA_SUCCESS) {
3055 /* Ran out of IDs to use */
3056 break;
3057 }
3058 } 3054 }
3059 /* Login and update database */ 3055 fcport->scan_state = QLA_FCPORT_SCAN_NONE;
3060 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3056
3061 } 3057 /* Login fabric devices that need a login */
3062 3058 if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
3063 /* Exit if out of loop IDs. */ 3059 atomic_read(&vha->loop_down_timer) == 0) {
3064 if (rval != QLA_SUCCESS) { 3060 if (fcport->loop_id == FC_NO_LOOP_ID) {
3065 break; 3061 fcport->loop_id = next_loopid;
3066 } 3062 rval = qla2x00_find_new_loop_id(
3067 3063 base_vha, fcport);
3068 /* 3064 if (rval != QLA_SUCCESS) {
3069 * Login and add the new devices to our port list. 3065 /* Ran out of IDs to use */
3070 */ 3066 continue;
3071 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 3067 }
3072 if (atomic_read(&vha->loop_down_timer) || 3068 }
3073 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3074 break;
3075
3076 /* Find a new loop ID to use. */
3077 fcport->loop_id = next_loopid;
3078 rval = qla2x00_find_new_loop_id(base_vha, fcport);
3079 if (rval != QLA_SUCCESS) {
3080 /* Ran out of IDs to use */
3081 break;
3082 } 3069 }
3083 3070
3084 /* Login and update database */ 3071 /* Login and update database */
3085 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3072 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3086
3087 if (vha->vp_idx) {
3088 fcport->vha = vha;
3089 fcport->vp_idx = vha->vp_idx;
3090 }
3091 list_move_tail(&fcport->list, &vha->vp_fcports);
3092 } 3073 }
3093 } while (0); 3074 } while (0);
3094 3075
3095 /* Free all new device structures not processed. */
3096 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3097 list_del(&fcport->list);
3098 kfree(fcport);
3099 }
3100
3101 if (rval) { 3076 if (rval) {
3102 ql_dbg(ql_dbg_disc, vha, 0x2068, 3077 ql_dbg(ql_dbg_disc, vha, 0x2068,
3103 "Configure fabric error exit rval=%d.\n", rval); 3078 "Configure fabric error exit rval=%d.\n", rval);
@@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3287 WWN_SIZE)) 3262 WWN_SIZE))
3288 continue; 3263 continue;
3289 3264
3290 fcport->scan_state = QLA_FCPORT_FOUND; 3265 fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
3291 3266
3292 found++; 3267 found++;
3293 3268
@@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3595 if (mb[10] & BIT_1) 3570 if (mb[10] & BIT_1)
3596 fcport->supported_classes |= FC_COS_CLASS3; 3571 fcport->supported_classes |= FC_COS_CLASS3;
3597 3572
3573 if (IS_FWI2_CAPABLE(ha)) {
3574 if (mb[10] & BIT_7)
3575 fcport->flags |=
3576 FCF_CONF_COMP_SUPPORTED;
3577 }
3578
3598 rval = QLA_SUCCESS; 3579 rval = QLA_SUCCESS;
3599 break; 3580 break;
3600 } else if (mb[0] == MBS_LOOP_ID_USED) { 3581 } else if (mb[0] == MBS_LOOP_ID_USED) {
@@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3841 vha->flags.online = 0; 3822 vha->flags.online = 0;
3842 ha->flags.chip_reset_done = 0; 3823 ha->flags.chip_reset_done = 0;
3843 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3824 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3844 ha->qla_stats.total_isp_aborts++; 3825 vha->qla_stats.total_isp_aborts++;
3845 3826
3846 ql_log(ql_log_info, vha, 0x00af, 3827 ql_log(ql_log_info, vha, 0x00af,
3847 "Performing ISP error recovery - ha=%p.\n", ha); 3828 "Performing ISP error recovery - ha=%p.\n", ha);
@@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4066 struct qla_hw_data *ha = vha->hw; 4047 struct qla_hw_data *ha = vha->hw;
4067 struct req_que *req = ha->req_q_map[0]; 4048 struct req_que *req = ha->req_q_map[0];
4068 struct rsp_que *rsp = ha->rsp_q_map[0]; 4049 struct rsp_que *rsp = ha->rsp_q_map[0];
4050 unsigned long flags;
4069 4051
4070 /* If firmware needs to be loaded */ 4052 /* If firmware needs to be loaded */
4071 if (qla2x00_isp_firmware(vha)) { 4053 if (qla2x00_isp_firmware(vha)) {
@@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4090 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4072 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4091 4073
4092 vha->flags.online = 1; 4074 vha->flags.online = 1;
4075
4076 /*
4077 * Process any ATIO queue entries that came in
4078 * while we weren't online.
4079 */
4080 spin_lock_irqsave(&ha->hardware_lock, flags);
4081 if (qla_tgt_mode_enabled(vha))
4082 qlt_24xx_process_atio_queue(vha);
4083 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4084
4093 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 4085 /* Wait at most MAX_TARGET RSCNs for a stable link. */
4094 wait_time = 256; 4086 wait_time = 256;
4095 do { 4087 do {
@@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4279 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4271 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4280 /* Reset NVRAM data. */ 4272 /* Reset NVRAM data. */
4281 ql_log(ql_log_warn, vha, 0x006b, 4273 ql_log(ql_log_warn, vha, 0x006b,
4282 "Inconisistent NVRAM detected: checksum=0x%x id=%c " 4274 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
4283 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); 4275 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4284 ql_log(ql_log_warn, vha, 0x006c, 4276 ql_log(ql_log_warn, vha, 0x006c,
4285 "Falling back to functioning (yet invalid -- WWPN) " 4277 "Falling back to functioning (yet invalid -- WWPN) "
@@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4330 rval = 1; 4322 rval = 1;
4331 } 4323 }
4332 4324
4325 if (!qla_ini_mode_enabled(vha)) {
4326 /* Don't enable full login after initial LIP */
4327 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4328 /* Don't enable LIP full login for initiator */
4329 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4330 }
4331
4332 qlt_24xx_config_nvram_stage1(vha, nv);
4333
4333 /* Reset Initialization control block */ 4334 /* Reset Initialization control block */
4334 memset(icb, 0, ha->init_cb_size); 4335 memset(icb, 0, ha->init_cb_size);
4335 4336
@@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4357 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 4358 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4358 "QLA2462"); 4359 "QLA2462");
4359 4360
4360 /* Use alternate WWN? */ 4361 qlt_24xx_config_nvram_stage2(vha, icb);
4362
4361 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 4363 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4364 /* Use alternate WWN? */
4362 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4365 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4363 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4366 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4364 } 4367 }
@@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5029 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 5032 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5030 /* Reset NVRAM data. */ 5033 /* Reset NVRAM data. */
5031 ql_log(ql_log_info, vha, 0x0073, 5034 ql_log(ql_log_info, vha, 0x0073,
5032 "Inconisistent NVRAM detected: checksum=0x%x id=%c " 5035 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
5033 "version=0x%x.\n", chksum, nv->id[0], 5036 "version=0x%x.\n", chksum, nv->id[0],
5034 le16_to_cpu(nv->nvram_version)); 5037 le16_to_cpu(nv->nvram_version));
5035 ql_log(ql_log_info, vha, 0x0074, 5038 ql_log(ql_log_info, vha, 0x0074,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index eac950924497..70dbf53d9e0f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/blkdev.h> 10#include <linux/blkdev.h>
10#include <linux/delay.h> 11#include <linux/delay.h>
@@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp)
23{ 24{
24 uint16_t cflags; 25 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 26 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 struct scsi_qla_host *vha = sp->fcport->vha;
26 28
27 cflags = 0; 29 cflags = 0;
28 30
29 /* Set transfer direction */ 31 /* Set transfer direction */
30 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
31 cflags = CF_WRITE; 33 cflags = CF_WRITE;
32 sp->fcport->vha->hw->qla_stats.output_bytes += 34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
33 scsi_bufflen(cmd);
34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
35 cflags = CF_READ; 36 cflags = CF_READ;
36 sp->fcport->vha->hw->qla_stats.input_bytes += 37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
37 scsi_bufflen(cmd);
38 } 38 }
39 return (cflags); 39 return (cflags);
40} 40}
@@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp)
385 else 385 else
386 req->cnt = req->length - 386 req->cnt = req->length -
387 (req->ring_index - cnt); 387 (req->ring_index - cnt);
388 /* If still no head room then bail out */
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
388 } 391 }
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
391 392
392 /* Build command packet */ 393 /* Build command packet */
393 req->current_outstanding_cmd = handle; 394 req->current_outstanding_cmd = handle;
@@ -470,7 +471,7 @@ queuing_error:
470/** 471/**
471 * qla2x00_start_iocbs() - Execute the IOCB command 472 * qla2x00_start_iocbs() - Execute the IOCB command
472 */ 473 */
473static void 474void
474qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 475qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475{ 476{
476 struct qla_hw_data *ha = vha->hw; 477 struct qla_hw_data *ha = vha->hw;
@@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
571 return (ret); 572 return (ret);
572} 573}
573 574
575/*
576 * qla2x00_issue_marker
577 *
578 * Issue marker
579 * Caller CAN have hardware lock held as specified by ha_locked parameter.
580 * Might release it, then reaquire.
581 */
582int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
583{
584 if (ha_locked) {
585 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
586 MK_SYNC_ALL) != QLA_SUCCESS)
587 return QLA_FUNCTION_FAILED;
588 } else {
589 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
590 MK_SYNC_ALL) != QLA_SUCCESS)
591 return QLA_FUNCTION_FAILED;
592 }
593 vha->marker_needed = 0;
594
595 return QLA_SUCCESS;
596}
597
574/** 598/**
575 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and 599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
576 * Continuation Type 1 IOCBs to allocate. 600 * Continuation Type 1 IOCBs to allocate.
@@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
629 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 653 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
630 cmd_pkt->control_flags = 654 cmd_pkt->control_flags =
631 __constant_cpu_to_le16(CF_WRITE_DATA); 655 __constant_cpu_to_le16(CF_WRITE_DATA);
632 ha->qla_stats.output_bytes += scsi_bufflen(cmd); 656 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
633 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 657 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
634 cmd_pkt->control_flags = 658 cmd_pkt->control_flags =
635 __constant_cpu_to_le16(CF_READ_DATA); 659 __constant_cpu_to_le16(CF_READ_DATA);
636 ha->qla_stats.input_bytes += scsi_bufflen(cmd); 660 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
637 } 661 }
638 662
639 cur_seg = scsi_sglist(cmd); 663 cur_seg = scsi_sglist(cmd);
@@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
745 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 769 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
746 cmd_pkt->task_mgmt_flags = 770 cmd_pkt->task_mgmt_flags =
747 __constant_cpu_to_le16(TMF_WRITE_DATA); 771 __constant_cpu_to_le16(TMF_WRITE_DATA);
748 sp->fcport->vha->hw->qla_stats.output_bytes += 772 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
749 scsi_bufflen(cmd);
750 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 773 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
751 cmd_pkt->task_mgmt_flags = 774 cmd_pkt->task_mgmt_flags =
752 __constant_cpu_to_le16(TMF_READ_DATA); 775 __constant_cpu_to_le16(TMF_READ_DATA);
753 sp->fcport->vha->hw->qla_stats.input_bytes += 776 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
754 scsi_bufflen(cmd);
755 } 777 }
756 778
757 /* One DSD is available in the Command Type 3 IOCB */ 779 /* One DSD is available in the Command Type 3 IOCB */
@@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1245 return QLA_SUCCESS; 1267 return QLA_SUCCESS;
1246 } 1268 }
1247 1269
1248 cmd_pkt->vp_index = sp->fcport->vp_idx; 1270 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1249 1271
1250 /* Set transfer direction */ 1272 /* Set transfer direction */
1251 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1273 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp)
1502 else 1524 else
1503 req->cnt = req->length - 1525 req->cnt = req->length -
1504 (req->ring_index - cnt); 1526 (req->ring_index - cnt);
1527 if (req->cnt < (req_cnt + 2))
1528 goto queuing_error;
1505 } 1529 }
1506 if (req->cnt < (req_cnt + 2))
1507 goto queuing_error;
1508 1530
1509 /* Build command packet. */ 1531 /* Build command packet. */
1510 req->current_outstanding_cmd = handle; 1532 req->current_outstanding_cmd = handle;
@@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp)
1527 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1549 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1528 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1550 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1529 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1551 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1530 cmd_pkt->vp_index = sp->fcport->vp_idx; 1552 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1531 1553
1532 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1554 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1533 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1555 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
1717 else 1739 else
1718 req->cnt = req->length - 1740 req->cnt = req->length -
1719 (req->ring_index - cnt); 1741 (req->ring_index - cnt);
1742 if (req->cnt < (req_cnt + 2))
1743 goto queuing_error;
1720 } 1744 }
1721 1745
1722 if (req->cnt < (req_cnt + 2))
1723 goto queuing_error;
1724
1725 status |= QDSS_GOT_Q_SPACE; 1746 status |= QDSS_GOT_Q_SPACE;
1726 1747
1727 /* Build header part of command packet (excluding the OPCODE). */ 1748 /* Build header part of command packet (excluding the OPCODE). */
@@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1898 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1919 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1899 logio->port_id[1] = sp->fcport->d_id.b.area; 1920 logio->port_id[1] = sp->fcport->d_id.b.area;
1900 logio->port_id[2] = sp->fcport->d_id.b.domain; 1921 logio->port_id[2] = sp->fcport->d_id.b.domain;
1901 logio->vp_index = sp->fcport->vp_idx; 1922 logio->vp_index = sp->fcport->vha->vp_idx;
1902} 1923}
1903 1924
1904static void 1925static void
@@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1922 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1943 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1923 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1944 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1924 sp->fcport->d_id.b.al_pa); 1945 sp->fcport->d_id.b.al_pa);
1925 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); 1946 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1926} 1947}
1927 1948
1928static void 1949static void
@@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1935 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1956 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1936 logio->port_id[1] = sp->fcport->d_id.b.area; 1957 logio->port_id[1] = sp->fcport->d_id.b.area;
1937 logio->port_id[2] = sp->fcport->d_id.b.domain; 1958 logio->port_id[2] = sp->fcport->d_id.b.domain;
1938 logio->vp_index = sp->fcport->vp_idx; 1959 logio->vp_index = sp->fcport->vha->vp_idx;
1939} 1960}
1940 1961
1941static void 1962static void
@@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1952 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1973 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1953 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1974 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1954 sp->fcport->d_id.b.al_pa); 1975 sp->fcport->d_id.b.al_pa);
1955 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); 1976 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1956 /* Implicit: mbx->mbx10 = 0. */ 1977 /* Implicit: mbx->mbx10 = 0. */
1957} 1978}
1958 1979
@@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1962 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1983 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1963 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 1984 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1964 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1985 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1965 logio->vp_index = sp->fcport->vp_idx; 1986 logio->vp_index = sp->fcport->vha->vp_idx;
1966} 1987}
1967 1988
1968static void 1989static void
@@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1983 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 2004 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1984 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 2005 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1985 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 2006 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1986 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); 2007 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1987} 2008}
1988 2009
1989static void 2010static void
@@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2009 tsk->port_id[0] = fcport->d_id.b.al_pa; 2030 tsk->port_id[0] = fcport->d_id.b.al_pa;
2010 tsk->port_id[1] = fcport->d_id.b.area; 2031 tsk->port_id[1] = fcport->d_id.b.area;
2011 tsk->port_id[2] = fcport->d_id.b.domain; 2032 tsk->port_id[2] = fcport->d_id.b.domain;
2012 tsk->vp_index = fcport->vp_idx; 2033 tsk->vp_index = fcport->vha->vp_idx;
2013 2034
2014 if (flags == TCF_LUN_RESET) { 2035 if (flags == TCF_LUN_RESET) {
2015 int_to_scsilun(lun, &tsk->lun); 2036 int_to_scsilun(lun, &tsk->lun);
@@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2030 els_iocb->handle = sp->handle; 2051 els_iocb->handle = sp->handle;
2031 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2052 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2032 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); 2053 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2033 els_iocb->vp_index = sp->fcport->vp_idx; 2054 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2034 els_iocb->sof_type = EST_SOFI3; 2055 els_iocb->sof_type = EST_SOFI3;
2035 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2056 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2036 2057
@@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2160 ct_iocb->handle = sp->handle; 2181 ct_iocb->handle = sp->handle;
2161 2182
2162 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2183 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2163 ct_iocb->vp_index = sp->fcport->vp_idx; 2184 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2164 ct_iocb->comp_status = __constant_cpu_to_le16(0); 2185 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2165 2186
2166 ct_iocb->cmd_dsd_count = 2187 ct_iocb->cmd_dsd_count =
@@ -2343,11 +2364,10 @@ sufficient_dsds:
2343 else 2364 else
2344 req->cnt = req->length - 2365 req->cnt = req->length -
2345 (req->ring_index - cnt); 2366 (req->ring_index - cnt);
2367 if (req->cnt < (req_cnt + 2))
2368 goto queuing_error;
2346 } 2369 }
2347 2370
2348 if (req->cnt < (req_cnt + 2))
2349 goto queuing_error;
2350
2351 ctx = sp->u.scmd.ctx = 2371 ctx = sp->u.scmd.ctx =
2352 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2372 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2353 if (!ctx) { 2373 if (!ctx) {
@@ -2362,7 +2382,7 @@ sufficient_dsds:
2362 if (!ctx->fcp_cmnd) { 2382 if (!ctx->fcp_cmnd) {
2363 ql_log(ql_log_fatal, vha, 0x3011, 2383 ql_log(ql_log_fatal, vha, 0x3011,
2364 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 2384 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2365 goto queuing_error_fcp_cmnd; 2385 goto queuing_error;
2366 } 2386 }
2367 2387
2368 /* Initialize the DSD list and dma handle */ 2388 /* Initialize the DSD list and dma handle */
@@ -2400,7 +2420,7 @@ sufficient_dsds:
2400 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2420 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2401 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2421 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2402 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2422 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2403 cmd_pkt->vp_index = sp->fcport->vp_idx; 2423 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2404 2424
2405 /* Build IOCB segments */ 2425 /* Build IOCB segments */
2406 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2426 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2489,7 +2509,7 @@ sufficient_dsds:
2489 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2509 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2490 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2510 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2491 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2511 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2492 cmd_pkt->vp_index = sp->fcport->vp_idx; 2512 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2493 2513
2494 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2514 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2495 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2515 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ce42288049b5..6f67a9d4998b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
@@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
309 "IDC failed to post ACK.\n"); 310 "IDC failed to post ACK.\n");
310} 311}
311 312
313#define LS_UNKNOWN 2
314char *
315qla2x00_get_link_speed_str(struct qla_hw_data *ha)
316{
317 static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
318 char *link_speed;
319 int fw_speed = ha->link_data_rate;
320
321 if (IS_QLA2100(ha) || IS_QLA2200(ha))
322 link_speed = link_speeds[0];
323 else if (fw_speed == 0x13)
324 link_speed = link_speeds[6];
325 else {
326 link_speed = link_speeds[LS_UNKNOWN];
327 if (fw_speed < 6)
328 link_speed =
329 link_speeds[fw_speed];
330 }
331
332 return link_speed;
333}
334
312/** 335/**
313 * qla2x00_async_event() - Process aynchronous events. 336 * qla2x00_async_event() - Process aynchronous events.
314 * @ha: SCSI driver HA context 337 * @ha: SCSI driver HA context
@@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
317void 340void
318qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 341qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
319{ 342{
320#define LS_UNKNOWN 2
321 static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
322 char *link_speed;
323 uint16_t handle_cnt; 343 uint16_t handle_cnt;
324 uint16_t cnt, mbx; 344 uint16_t cnt, mbx;
325 uint32_t handles[5]; 345 uint32_t handles[5];
@@ -454,8 +474,8 @@ skip_rio:
454 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 474 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
455 ql_dbg(ql_dbg_async, vha, 0x5008, 475 ql_dbg(ql_dbg_async, vha, 0x5008,
456 "Asynchronous WAKEUP_THRES.\n"); 476 "Asynchronous WAKEUP_THRES.\n");
457 break;
458 477
478 break;
459 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 479 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
460 ql_dbg(ql_dbg_async, vha, 0x5009, 480 ql_dbg(ql_dbg_async, vha, 0x5009,
461 "LIP occurred (%x).\n", mb[1]); 481 "LIP occurred (%x).\n", mb[1]);
@@ -479,20 +499,14 @@ skip_rio:
479 break; 499 break;
480 500
481 case MBA_LOOP_UP: /* Loop Up Event */ 501 case MBA_LOOP_UP: /* Loop Up Event */
482 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 502 if (IS_QLA2100(ha) || IS_QLA2200(ha))
483 link_speed = link_speeds[0];
484 ha->link_data_rate = PORT_SPEED_1GB; 503 ha->link_data_rate = PORT_SPEED_1GB;
485 } else { 504 else
486 link_speed = link_speeds[LS_UNKNOWN];
487 if (mb[1] < 6)
488 link_speed = link_speeds[mb[1]];
489 else if (mb[1] == 0x13)
490 link_speed = link_speeds[6];
491 ha->link_data_rate = mb[1]; 505 ha->link_data_rate = mb[1];
492 }
493 506
494 ql_dbg(ql_dbg_async, vha, 0x500a, 507 ql_dbg(ql_dbg_async, vha, 0x500a,
495 "LOOP UP detected (%s Gbps).\n", link_speed); 508 "LOOP UP detected (%s Gbps).\n",
509 qla2x00_get_link_speed_str(ha));
496 510
497 vha->flags.management_server_logged_in = 0; 511 vha->flags.management_server_logged_in = 0;
498 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 512 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -638,6 +652,8 @@ skip_rio:
638 ql_dbg(ql_dbg_async, vha, 0x5010, 652 ql_dbg(ql_dbg_async, vha, 0x5010,
639 "Port unavailable %04x %04x %04x.\n", 653 "Port unavailable %04x %04x %04x.\n",
640 mb[1], mb[2], mb[3]); 654 mb[1], mb[2], mb[3]);
655 ql_log(ql_log_warn, vha, 0x505e,
656 "Link is offline.\n");
641 657
642 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 658 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
643 atomic_set(&vha->loop_state, LOOP_DOWN); 659 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -670,12 +686,17 @@ skip_rio:
670 ql_dbg(ql_dbg_async, vha, 0x5011, 686 ql_dbg(ql_dbg_async, vha, 0x5011,
671 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 687 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
672 mb[1], mb[2], mb[3]); 688 mb[1], mb[2], mb[3]);
689
690 qlt_async_event(mb[0], vha, mb);
673 break; 691 break;
674 } 692 }
675 693
676 ql_dbg(ql_dbg_async, vha, 0x5012, 694 ql_dbg(ql_dbg_async, vha, 0x5012,
677 "Port database changed %04x %04x %04x.\n", 695 "Port database changed %04x %04x %04x.\n",
678 mb[1], mb[2], mb[3]); 696 mb[1], mb[2], mb[3]);
697 ql_log(ql_log_warn, vha, 0x505f,
698 "Link is operational (%s Gbps).\n",
699 qla2x00_get_link_speed_str(ha));
679 700
680 /* 701 /*
681 * Mark all devices as missing so we will login again. 702 * Mark all devices as missing so we will login again.
@@ -684,8 +705,13 @@ skip_rio:
684 705
685 qla2x00_mark_all_devices_lost(vha, 1); 706 qla2x00_mark_all_devices_lost(vha, 1);
686 707
708 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
709 set_bit(SCR_PENDING, &vha->dpc_flags);
710
687 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 711 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
688 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 712 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
713
714 qlt_async_event(mb[0], vha, mb);
689 break; 715 break;
690 716
691 case MBA_RSCN_UPDATE: /* State Change Registration */ 717 case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -807,6 +833,8 @@ skip_rio:
807 mb[0], mb[1], mb[2], mb[3]); 833 mb[0], mb[1], mb[2], mb[3]);
808 } 834 }
809 835
836 qlt_async_event(mb[0], vha, mb);
837
810 if (!vha->vp_idx && ha->num_vhosts) 838 if (!vha->vp_idx && ha->num_vhosts)
811 qla2x00_alert_all_vps(rsp, mb); 839 qla2x00_alert_all_vps(rsp, mb);
812} 840}
@@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1172 } else if (iop[0] & BIT_5) 1200 } else if (iop[0] & BIT_5)
1173 fcport->port_type = FCT_INITIATOR; 1201 fcport->port_type = FCT_INITIATOR;
1174 1202
1203 if (iop[0] & BIT_7)
1204 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1205
1175 if (logio->io_parameter[7] || logio->io_parameter[8]) 1206 if (logio->io_parameter[7] || logio->io_parameter[8])
1176 fcport->supported_classes |= FC_COS_CLASS2; 1207 fcport->supported_classes |= FC_COS_CLASS2;
1177 if (logio->io_parameter[9] || logio->io_parameter[10]) 1208 if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1986 2017
1987 if (pkt->entry_status != 0) { 2018 if (pkt->entry_status != 0) {
1988 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2019 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2020
2021 (void)qlt_24xx_process_response_error(vha, pkt);
2022
1989 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2023 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1990 wmb(); 2024 wmb();
1991 continue; 2025 continue;
@@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2016 case ELS_IOCB_TYPE: 2050 case ELS_IOCB_TYPE:
2017 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2051 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2018 break; 2052 break;
2053 case ABTS_RECV_24XX:
2054 /* ensure that the ATIO queue is empty */
2055 qlt_24xx_process_atio_queue(vha);
2056 case ABTS_RESP_24XX:
2057 case CTIO_TYPE7:
2058 case NOTIFY_ACK_TYPE:
2059 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2060 break;
2019 case MARKER_TYPE: 2061 case MARKER_TYPE:
2020 /* Do nothing in this case, this check is to prevent it 2062 /* Do nothing in this case, this check is to prevent it
2021 * from falling into default case 2063 * from falling into default case
@@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
2168 case 0x14: 2210 case 0x14:
2169 qla24xx_process_response_queue(vha, rsp); 2211 qla24xx_process_response_queue(vha, rsp);
2170 break; 2212 break;
2213 case 0x1C: /* ATIO queue updated */
2214 qlt_24xx_process_atio_queue(vha);
2215 break;
2216 case 0x1D: /* ATIO and response queues updated */
2217 qlt_24xx_process_atio_queue(vha);
2218 qla24xx_process_response_queue(vha, rsp);
2219 break;
2171 default: 2220 default:
2172 ql_dbg(ql_dbg_async, vha, 0x504f, 2221 ql_dbg(ql_dbg_async, vha, 0x504f,
2173 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2222 "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id)
2312 case 0x14: 2361 case 0x14:
2313 qla24xx_process_response_queue(vha, rsp); 2362 qla24xx_process_response_queue(vha, rsp);
2314 break; 2363 break;
2364 case 0x1C: /* ATIO queue updated */
2365 qlt_24xx_process_atio_queue(vha);
2366 break;
2367 case 0x1D: /* ATIO and response queues updated */
2368 qlt_24xx_process_atio_queue(vha);
2369 qla24xx_process_response_queue(vha, rsp);
2370 break;
2315 default: 2371 default:
2316 ql_dbg(ql_dbg_async, vha, 0x5051, 2372 ql_dbg(ql_dbg_async, vha, 0x5051,
2317 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2373 "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2564,7 +2620,15 @@ void
2564qla2x00_free_irqs(scsi_qla_host_t *vha) 2620qla2x00_free_irqs(scsi_qla_host_t *vha)
2565{ 2621{
2566 struct qla_hw_data *ha = vha->hw; 2622 struct qla_hw_data *ha = vha->hw;
2567 struct rsp_que *rsp = ha->rsp_q_map[0]; 2623 struct rsp_que *rsp;
2624
2625 /*
2626 * We need to check that ha->rsp_q_map is valid in case we are called
2627 * from a probe failure context.
2628 */
2629 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
2630 return;
2631 rsp = ha->rsp_q_map[0];
2568 2632
2569 if (ha->flags.msix_enabled) 2633 if (ha->flags.msix_enabled)
2570 qla24xx_disable_msix(ha); 2634 qla24xx_disable_msix(ha);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b4a23394a7bd..d5ce92c0a8fc 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/gfp.h> 11#include <linux/gfp.h>
@@ -270,11 +271,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
270 ictrl = RD_REG_WORD(&reg->isp.ictrl); 271 ictrl = RD_REG_WORD(&reg->isp.ictrl);
271 } 272 }
272 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 273 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
273 "MBX Command timeout for cmd %x.\n", command); 274 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a, 275 "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
275 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
276 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
277 "mb[0] = 0x%x.\n", mb0);
278 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
279 277
280 /* 278 /*
@@ -320,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
320 CRB_NIU_XG_PAUSE_CTL_P1); 318 CRB_NIU_XG_PAUSE_CTL_P1);
321 } 319 }
322 ql_log(ql_log_info, base_vha, 0x101c, 320 ql_log(ql_log_info, base_vha, 0x101c,
323 "Mailbox cmd timeout occured, cmd=0x%x, " 321 "Mailbox cmd timeout occurred, cmd=0x%x, "
324 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 322 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
325 "abort.\n", command, mcp->mb[0], 323 "abort.\n", command, mcp->mb[0],
326 ha->flags.eeh_busy); 324 ha->flags.eeh_busy);
@@ -345,7 +343,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
345 CRB_NIU_XG_PAUSE_CTL_P1); 343 CRB_NIU_XG_PAUSE_CTL_P1);
346 } 344 }
347 ql_log(ql_log_info, base_vha, 0x101e, 345 ql_log(ql_log_info, base_vha, 0x101e,
348 "Mailbox cmd timeout occured, cmd=0x%x, " 346 "Mailbox cmd timeout occurred, cmd=0x%x, "
349 "mb[0]=0x%x. Scheduling ISP abort ", 347 "mb[0]=0x%x. Scheduling ISP abort ",
350 command, mcp->mb[0]); 348 command, mcp->mb[0]);
351 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 349 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
@@ -390,7 +388,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
390 mbx_cmd_t mc; 388 mbx_cmd_t mc;
391 mbx_cmd_t *mcp = &mc; 389 mbx_cmd_t *mcp = &mc;
392 390
393 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__); 391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
392 "Entered %s.\n", __func__);
394 393
395 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 394 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
396 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 395 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -424,7 +423,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
424 ql_dbg(ql_dbg_mbx, vha, 0x1023, 423 ql_dbg(ql_dbg_mbx, vha, 0x1023,
425 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 424 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
426 } else { 425 } else {
427 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__); 426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
427 "Done %s.\n", __func__);
428 } 428 }
429 429
430 return rval; 430 return rval;
@@ -454,7 +454,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
454 mbx_cmd_t mc; 454 mbx_cmd_t mc;
455 mbx_cmd_t *mcp = &mc; 455 mbx_cmd_t *mcp = &mc;
456 456
457 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__); 457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
458 "Entered %s.\n", __func__);
458 459
459 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 460 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
460 mcp->out_mb = MBX_0; 461 mcp->out_mb = MBX_0;
@@ -489,10 +490,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
489 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 490 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
490 } else { 491 } else {
491 if (IS_FWI2_CAPABLE(ha)) { 492 if (IS_FWI2_CAPABLE(ha)) {
492 ql_dbg(ql_dbg_mbx, vha, 0x1027, 493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
493 "Done exchanges=%x.\n", mcp->mb[1]); 494 "Done exchanges=%x.\n", mcp->mb[1]);
494 } else { 495 } else {
495 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__); 496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
497 "Done %s.\n", __func__);
496 } 498 }
497 } 499 }
498 500
@@ -523,7 +525,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
523 mbx_cmd_t *mcp = &mc; 525 mbx_cmd_t *mcp = &mc;
524 struct qla_hw_data *ha = vha->hw; 526 struct qla_hw_data *ha = vha->hw;
525 527
526 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__); 528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
529 "Entered %s.\n", __func__);
527 530
528 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 531 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
529 mcp->out_mb = MBX_0; 532 mcp->out_mb = MBX_0;
@@ -561,11 +564,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
561 ha->fw_attributes_h = mcp->mb[15]; 564 ha->fw_attributes_h = mcp->mb[15];
562 ha->fw_attributes_ext[0] = mcp->mb[16]; 565 ha->fw_attributes_ext[0] = mcp->mb[16];
563 ha->fw_attributes_ext[1] = mcp->mb[17]; 566 ha->fw_attributes_ext[1] = mcp->mb[17];
564 ql_dbg(ql_dbg_mbx, vha, 0x1139, 567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
565 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 568 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
566 __func__, mcp->mb[15], mcp->mb[6]); 569 __func__, mcp->mb[15], mcp->mb[6]);
567 } else 570 } else
568 ql_dbg(ql_dbg_mbx, vha, 0x112f, 571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
569 "%s: FwAttributes [Upper] invalid, MB6:%04x\n", 572 "%s: FwAttributes [Upper] invalid, MB6:%04x\n",
570 __func__, mcp->mb[6]); 573 __func__, mcp->mb[6]);
571 } 574 }
@@ -576,7 +579,8 @@ failed:
576 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 579 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
577 } else { 580 } else {
578 /*EMPTY*/ 581 /*EMPTY*/
579 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__); 582 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
583 "Done %s.\n", __func__);
580 } 584 }
581 return rval; 585 return rval;
582} 586}
@@ -602,7 +606,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
602 mbx_cmd_t mc; 606 mbx_cmd_t mc;
603 mbx_cmd_t *mcp = &mc; 607 mbx_cmd_t *mcp = &mc;
604 608
605 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__); 609 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
610 "Entered %s.\n", __func__);
606 611
607 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 612 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
608 mcp->out_mb = MBX_0; 613 mcp->out_mb = MBX_0;
@@ -620,7 +625,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
620 fwopts[2] = mcp->mb[2]; 625 fwopts[2] = mcp->mb[2];
621 fwopts[3] = mcp->mb[3]; 626 fwopts[3] = mcp->mb[3];
622 627
623 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__); 628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
629 "Done %s.\n", __func__);
624 } 630 }
625 631
626 return rval; 632 return rval;
@@ -648,7 +654,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
648 mbx_cmd_t mc; 654 mbx_cmd_t mc;
649 mbx_cmd_t *mcp = &mc; 655 mbx_cmd_t *mcp = &mc;
650 656
651 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__); 657 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
658 "Entered %s.\n", __func__);
652 659
653 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 660 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
654 mcp->mb[1] = fwopts[1]; 661 mcp->mb[1] = fwopts[1];
@@ -676,7 +683,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
676 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 683 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
677 } else { 684 } else {
678 /*EMPTY*/ 685 /*EMPTY*/
679 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__); 686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
687 "Done %s.\n", __func__);
680 } 688 }
681 689
682 return rval; 690 return rval;
@@ -704,7 +712,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
704 mbx_cmd_t mc; 712 mbx_cmd_t mc;
705 mbx_cmd_t *mcp = &mc; 713 mbx_cmd_t *mcp = &mc;
706 714
707 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__); 715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
716 "Entered %s.\n", __func__);
708 717
709 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 718 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
710 mcp->mb[1] = 0xAAAA; 719 mcp->mb[1] = 0xAAAA;
@@ -734,7 +743,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
734 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 743 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
735 } else { 744 } else {
736 /*EMPTY*/ 745 /*EMPTY*/
737 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__); 746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
747 "Done %s.\n", __func__);
738 } 748 }
739 749
740 return rval; 750 return rval;
@@ -762,7 +772,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
762 mbx_cmd_t mc; 772 mbx_cmd_t mc;
763 mbx_cmd_t *mcp = &mc; 773 mbx_cmd_t *mcp = &mc;
764 774
765 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__); 775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
776 "Entered %s.\n", __func__);
766 777
767 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 778 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
768 mcp->out_mb = MBX_0; 779 mcp->out_mb = MBX_0;
@@ -787,7 +798,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
787 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 798 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
788 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 799 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
789 } else { 800 } else {
790 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__); 801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
802 "Done %s.\n", __func__);
791 } 803 }
792 804
793 return rval; 805 return rval;
@@ -819,7 +831,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
819 mbx_cmd_t mc; 831 mbx_cmd_t mc;
820 mbx_cmd_t *mcp = &mc; 832 mbx_cmd_t *mcp = &mc;
821 833
822 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__); 834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
835 "Entered %s.\n", __func__);
823 836
824 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 837 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
825 mcp->mb[1] = 0; 838 mcp->mb[1] = 0;
@@ -842,7 +855,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
842 /* Mask reserved bits. */ 855 /* Mask reserved bits. */
843 sts_entry->entry_status &= 856 sts_entry->entry_status &=
844 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 857 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
845 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__); 858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
859 "Done %s.\n", __func__);
846 } 860 }
847 861
848 return rval; 862 return rval;
@@ -884,7 +898,8 @@ qla2x00_abort_command(srb_t *sp)
884 struct req_que *req = vha->req; 898 struct req_que *req = vha->req;
885 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 899 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
886 900
887 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__); 901 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
902 "Entered %s.\n", __func__);
888 903
889 spin_lock_irqsave(&ha->hardware_lock, flags); 904 spin_lock_irqsave(&ha->hardware_lock, flags);
890 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 905 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -915,7 +930,8 @@ qla2x00_abort_command(srb_t *sp)
915 if (rval != QLA_SUCCESS) { 930 if (rval != QLA_SUCCESS) {
916 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 931 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
917 } else { 932 } else {
918 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__); 933 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
934 "Done %s.\n", __func__);
919 } 935 }
920 936
921 return rval; 937 return rval;
@@ -934,7 +950,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
934 l = l; 950 l = l;
935 vha = fcport->vha; 951 vha = fcport->vha;
936 952
937 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__); 953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
954 "Entered %s.\n", __func__);
938 955
939 req = vha->hw->req_q_map[0]; 956 req = vha->hw->req_q_map[0];
940 rsp = req->rsp; 957 rsp = req->rsp;
@@ -955,7 +972,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
955 mcp->flags = 0; 972 mcp->flags = 0;
956 rval = qla2x00_mailbox_command(vha, mcp); 973 rval = qla2x00_mailbox_command(vha, mcp);
957 if (rval != QLA_SUCCESS) { 974 if (rval != QLA_SUCCESS) {
958 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval); 975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
976 "Failed=%x.\n", rval);
959 } 977 }
960 978
961 /* Issue marker IOCB. */ 979 /* Issue marker IOCB. */
@@ -965,7 +983,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
965 ql_dbg(ql_dbg_mbx, vha, 0x1040, 983 ql_dbg(ql_dbg_mbx, vha, 0x1040,
966 "Failed to issue marker IOCB (%x).\n", rval2); 984 "Failed to issue marker IOCB (%x).\n", rval2);
967 } else { 985 } else {
968 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__); 986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
987 "Done %s.\n", __func__);
969 } 988 }
970 989
971 return rval; 990 return rval;
@@ -983,7 +1002,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
983 1002
984 vha = fcport->vha; 1003 vha = fcport->vha;
985 1004
986 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__); 1005 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1006 "Entered %s.\n", __func__);
987 1007
988 req = vha->hw->req_q_map[0]; 1008 req = vha->hw->req_q_map[0];
989 rsp = req->rsp; 1009 rsp = req->rsp;
@@ -1012,7 +1032,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
1012 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1032 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1013 "Failed to issue marker IOCB (%x).\n", rval2); 1033 "Failed to issue marker IOCB (%x).\n", rval2);
1014 } else { 1034 } else {
1015 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__); 1035 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1036 "Done %s.\n", __func__);
1016 } 1037 }
1017 1038
1018 return rval; 1039 return rval;
@@ -1046,7 +1067,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1046 mbx_cmd_t mc; 1067 mbx_cmd_t mc;
1047 mbx_cmd_t *mcp = &mc; 1068 mbx_cmd_t *mcp = &mc;
1048 1069
1049 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__); 1070 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1071 "Entered %s.\n", __func__);
1050 1072
1051 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1073 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1052 mcp->mb[9] = vha->vp_idx; 1074 mcp->mb[9] = vha->vp_idx;
@@ -1074,7 +1096,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1074 /*EMPTY*/ 1096 /*EMPTY*/
1075 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1097 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1076 } else { 1098 } else {
1077 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__); 1099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1100 "Done %s.\n", __func__);
1078 1101
1079 if (IS_CNA_CAPABLE(vha->hw)) { 1102 if (IS_CNA_CAPABLE(vha->hw)) {
1080 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1103 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1115,7 +1138,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1115 mbx_cmd_t mc; 1138 mbx_cmd_t mc;
1116 mbx_cmd_t *mcp = &mc; 1139 mbx_cmd_t *mcp = &mc;
1117 1140
1118 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__); 1141 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1142 "Entered %s.\n", __func__);
1119 1143
1120 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1144 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1121 mcp->out_mb = MBX_0; 1145 mcp->out_mb = MBX_0;
@@ -1138,7 +1162,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1138 *tov = ratov; 1162 *tov = ratov;
1139 } 1163 }
1140 1164
1141 ql_dbg(ql_dbg_mbx, vha, 0x104b, 1165 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1142 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1166 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1143 } 1167 }
1144 1168
@@ -1170,7 +1194,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1170 mbx_cmd_t *mcp = &mc; 1194 mbx_cmd_t *mcp = &mc;
1171 struct qla_hw_data *ha = vha->hw; 1195 struct qla_hw_data *ha = vha->hw;
1172 1196
1173 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__); 1197 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1198 "Entered %s.\n", __func__);
1174 1199
1175 if (IS_QLA82XX(ha) && ql2xdbwr) 1200 if (IS_QLA82XX(ha) && ql2xdbwr)
1176 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, 1201 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1213,9 +1238,100 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1213 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1238 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1214 } else { 1239 } else {
1215 /*EMPTY*/ 1240 /*EMPTY*/
1216 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__); 1241 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1242 "Done %s.\n", __func__);
1243 }
1244
1245 return rval;
1246}
1247
1248/*
1249 * qla2x00_get_node_name_list
1250 * Issue get node name list mailbox command, kmalloc()
1251 * and return the resulting list. Caller must kfree() it!
1252 *
1253 * Input:
1254 * ha = adapter state pointer.
1255 * out_data = resulting list
1256 * out_len = length of the resulting list
1257 *
1258 * Returns:
1259 * qla2x00 local function return status code.
1260 *
1261 * Context:
1262 * Kernel context.
1263 */
1264int
1265qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
1266{
1267 struct qla_hw_data *ha = vha->hw;
1268 struct qla_port_24xx_data *list = NULL;
1269 void *pmap;
1270 mbx_cmd_t mc;
1271 dma_addr_t pmap_dma;
1272 ulong dma_size;
1273 int rval, left;
1274
1275 left = 1;
1276 while (left > 0) {
1277 dma_size = left * sizeof(*list);
1278 pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
1279 &pmap_dma, GFP_KERNEL);
1280 if (!pmap) {
1281 ql_log(ql_log_warn, vha, 0x113f,
1282 "%s(%ld): DMA Alloc failed of %ld\n",
1283 __func__, vha->host_no, dma_size);
1284 rval = QLA_MEMORY_ALLOC_FAILED;
1285 goto out;
1286 }
1287
1288 mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
1289 mc.mb[1] = BIT_1 | BIT_3;
1290 mc.mb[2] = MSW(pmap_dma);
1291 mc.mb[3] = LSW(pmap_dma);
1292 mc.mb[6] = MSW(MSD(pmap_dma));
1293 mc.mb[7] = LSW(MSD(pmap_dma));
1294 mc.mb[8] = dma_size;
1295 mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
1296 mc.in_mb = MBX_0|MBX_1;
1297 mc.tov = 30;
1298 mc.flags = MBX_DMA_IN;
1299
1300 rval = qla2x00_mailbox_command(vha, &mc);
1301 if (rval != QLA_SUCCESS) {
1302 if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
1303 (mc.mb[1] == 0xA)) {
1304 left += le16_to_cpu(mc.mb[2]) /
1305 sizeof(struct qla_port_24xx_data);
1306 goto restart;
1307 }
1308 goto out_free;
1309 }
1310
1311 left = 0;
1312
1313 list = kzalloc(dma_size, GFP_KERNEL);
1314 if (!list) {
1315 ql_log(ql_log_warn, vha, 0x1140,
1316 "%s(%ld): failed to allocate node names list "
1317 "structure.\n", __func__, vha->host_no);
1318 rval = QLA_MEMORY_ALLOC_FAILED;
1319 goto out_free;
1320 }
1321
1322 memcpy(list, pmap, dma_size);
1323restart:
1324 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1217 } 1325 }
1218 1326
1327 *out_data = list;
1328 *out_len = dma_size;
1329
1330out:
1331 return rval;
1332
1333out_free:
1334 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1219 return rval; 1335 return rval;
1220} 1336}
1221 1337
@@ -1246,7 +1362,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1246 dma_addr_t pd_dma; 1362 dma_addr_t pd_dma;
1247 struct qla_hw_data *ha = vha->hw; 1363 struct qla_hw_data *ha = vha->hw;
1248 1364
1249 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__); 1365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1366 "Entered %s.\n", __func__);
1250 1367
1251 pd24 = NULL; 1368 pd24 = NULL;
1252 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1369 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1326,6 +1443,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1326 fcport->port_type = FCT_INITIATOR; 1443 fcport->port_type = FCT_INITIATOR;
1327 else 1444 else
1328 fcport->port_type = FCT_TARGET; 1445 fcport->port_type = FCT_TARGET;
1446
1447 /* Passback COS information. */
1448 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1449 FC_COS_CLASS2 : FC_COS_CLASS3;
1450
1451 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1452 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1329 } else { 1453 } else {
1330 uint64_t zero = 0; 1454 uint64_t zero = 0;
1331 1455
@@ -1378,7 +1502,8 @@ gpd_error_out:
1378 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 1502 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1379 mcp->mb[0], mcp->mb[1]); 1503 mcp->mb[0], mcp->mb[1]);
1380 } else { 1504 } else {
1381 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__); 1505 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1506 "Done %s.\n", __func__);
1382 } 1507 }
1383 1508
1384 return rval; 1509 return rval;
@@ -1407,7 +1532,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1407 mbx_cmd_t mc; 1532 mbx_cmd_t mc;
1408 mbx_cmd_t *mcp = &mc; 1533 mbx_cmd_t *mcp = &mc;
1409 1534
1410 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__); 1535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1536 "Entered %s.\n", __func__);
1411 1537
1412 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1538 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1413 mcp->out_mb = MBX_0; 1539 mcp->out_mb = MBX_0;
@@ -1433,7 +1559,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1433 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 1559 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1434 } else { 1560 } else {
1435 /*EMPTY*/ 1561 /*EMPTY*/
1436 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__); 1562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
1563 "Done %s.\n", __func__);
1437 } 1564 }
1438 1565
1439 return rval; 1566 return rval;
@@ -1465,7 +1592,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1465 mbx_cmd_t mc; 1592 mbx_cmd_t mc;
1466 mbx_cmd_t *mcp = &mc; 1593 mbx_cmd_t *mcp = &mc;
1467 1594
1468 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__); 1595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
1596 "Entered %s.\n", __func__);
1469 1597
1470 mcp->mb[0] = MBC_GET_PORT_NAME; 1598 mcp->mb[0] = MBC_GET_PORT_NAME;
1471 mcp->mb[9] = vha->vp_idx; 1599 mcp->mb[9] = vha->vp_idx;
@@ -1499,7 +1627,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1499 name[7] = LSB(mcp->mb[7]); 1627 name[7] = LSB(mcp->mb[7]);
1500 } 1628 }
1501 1629
1502 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__); 1630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
1631 "Done %s.\n", __func__);
1503 } 1632 }
1504 1633
1505 return rval; 1634 return rval;
@@ -1527,7 +1656,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1527 mbx_cmd_t mc; 1656 mbx_cmd_t mc;
1528 mbx_cmd_t *mcp = &mc; 1657 mbx_cmd_t *mcp = &mc;
1529 1658
1530 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__); 1659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
1660 "Entered %s.\n", __func__);
1531 1661
1532 if (IS_CNA_CAPABLE(vha->hw)) { 1662 if (IS_CNA_CAPABLE(vha->hw)) {
1533 /* Logout across all FCFs. */ 1663 /* Logout across all FCFs. */
@@ -1564,7 +1694,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1564 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 1694 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1565 } else { 1695 } else {
1566 /*EMPTY*/ 1696 /*EMPTY*/
1567 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__); 1697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
1698 "Done %s.\n", __func__);
1568 } 1699 }
1569 1700
1570 return rval; 1701 return rval;
@@ -1596,9 +1727,10 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1596 mbx_cmd_t mc; 1727 mbx_cmd_t mc;
1597 mbx_cmd_t *mcp = &mc; 1728 mbx_cmd_t *mcp = &mc;
1598 1729
1599 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__); 1730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
1731 "Entered %s.\n", __func__);
1600 1732
1601 ql_dbg(ql_dbg_mbx, vha, 0x105e, 1733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
1602 "Retry cnt=%d ratov=%d total tov=%d.\n", 1734 "Retry cnt=%d ratov=%d total tov=%d.\n",
1603 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 1735 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1604 1736
@@ -1622,7 +1754,8 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1622 rval, mcp->mb[0], mcp->mb[1]); 1754 rval, mcp->mb[0], mcp->mb[1]);
1623 } else { 1755 } else {
1624 /*EMPTY*/ 1756 /*EMPTY*/
1625 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__); 1757 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
1758 "Done %s.\n", __func__);
1626 } 1759 }
1627 1760
1628 return rval; 1761 return rval;
@@ -1641,7 +1774,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1641 struct req_que *req; 1774 struct req_que *req;
1642 struct rsp_que *rsp; 1775 struct rsp_que *rsp;
1643 1776
1644 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__); 1777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
1778 "Entered %s.\n", __func__);
1645 1779
1646 if (ha->flags.cpu_affinity_enabled) 1780 if (ha->flags.cpu_affinity_enabled)
1647 req = ha->req_q_map[0]; 1781 req = ha->req_q_map[0];
@@ -1715,7 +1849,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1715 break; 1849 break;
1716 } 1850 }
1717 } else { 1851 } else {
1718 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__); 1852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
1853 "Done %s.\n", __func__);
1719 1854
1720 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1855 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1721 1856
@@ -1733,6 +1868,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1733 mb[10] |= BIT_0; /* Class 2. */ 1868 mb[10] |= BIT_0; /* Class 2. */
1734 if (lg->io_parameter[9] || lg->io_parameter[10]) 1869 if (lg->io_parameter[9] || lg->io_parameter[10])
1735 mb[10] |= BIT_1; /* Class 3. */ 1870 mb[10] |= BIT_1; /* Class 3. */
1871 if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
1872 mb[10] |= BIT_7; /* Confirmed Completion
1873 * Allowed
1874 */
1736 } 1875 }
1737 1876
1738 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1877 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1770,7 +1909,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1770 mbx_cmd_t *mcp = &mc; 1909 mbx_cmd_t *mcp = &mc;
1771 struct qla_hw_data *ha = vha->hw; 1910 struct qla_hw_data *ha = vha->hw;
1772 1911
1773 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__); 1912 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
1913 "Entered %s.\n", __func__);
1774 1914
1775 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1915 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1776 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1916 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1818,7 +1958,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1818 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 1958 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1819 } else { 1959 } else {
1820 /*EMPTY*/ 1960 /*EMPTY*/
1821 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__); 1961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
1962 "Done %s.\n", __func__);
1822 } 1963 }
1823 1964
1824 return rval; 1965 return rval;
@@ -1849,7 +1990,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1849 mbx_cmd_t *mcp = &mc; 1990 mbx_cmd_t *mcp = &mc;
1850 struct qla_hw_data *ha = vha->hw; 1991 struct qla_hw_data *ha = vha->hw;
1851 1992
1852 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__); 1993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
1994 "Entered %s.\n", __func__);
1853 1995
1854 if (IS_FWI2_CAPABLE(ha)) 1996 if (IS_FWI2_CAPABLE(ha))
1855 return qla24xx_login_fabric(vha, fcport->loop_id, 1997 return qla24xx_login_fabric(vha, fcport->loop_id,
@@ -1891,7 +2033,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1891 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2033 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1892 } else { 2034 } else {
1893 /*EMPTY*/ 2035 /*EMPTY*/
1894 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__); 2036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2037 "Done %s.\n", __func__);
1895 } 2038 }
1896 2039
1897 return (rval); 2040 return (rval);
@@ -1908,7 +2051,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1908 struct req_que *req; 2051 struct req_que *req;
1909 struct rsp_que *rsp; 2052 struct rsp_que *rsp;
1910 2053
1911 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__); 2054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2055 "Entered %s.\n", __func__);
1912 2056
1913 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2057 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1914 if (lg == NULL) { 2058 if (lg == NULL) {
@@ -1952,7 +2096,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1952 le32_to_cpu(lg->io_parameter[1])); 2096 le32_to_cpu(lg->io_parameter[1]));
1953 } else { 2097 } else {
1954 /*EMPTY*/ 2098 /*EMPTY*/
1955 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__); 2099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2100 "Done %s.\n", __func__);
1956 } 2101 }
1957 2102
1958 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2103 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1984,7 +2129,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1984 mbx_cmd_t mc; 2129 mbx_cmd_t mc;
1985 mbx_cmd_t *mcp = &mc; 2130 mbx_cmd_t *mcp = &mc;
1986 2131
1987 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__); 2132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2133 "Entered %s.\n", __func__);
1988 2134
1989 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2135 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1990 mcp->out_mb = MBX_1|MBX_0; 2136 mcp->out_mb = MBX_1|MBX_0;
@@ -2007,7 +2153,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2007 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2153 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2008 } else { 2154 } else {
2009 /*EMPTY*/ 2155 /*EMPTY*/
2010 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__); 2156 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2157 "Done %s.\n", __func__);
2011 } 2158 }
2012 2159
2013 return rval; 2160 return rval;
@@ -2035,7 +2182,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2035 mbx_cmd_t mc; 2182 mbx_cmd_t mc;
2036 mbx_cmd_t *mcp = &mc; 2183 mbx_cmd_t *mcp = &mc;
2037 2184
2038 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__); 2185 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2186 "Entered %s.\n", __func__);
2039 2187
2040 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2188 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2041 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; 2189 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2052,7 +2200,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2052 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2200 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2053 } else { 2201 } else {
2054 /*EMPTY*/ 2202 /*EMPTY*/
2055 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__); 2203 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2204 "Done %s.\n", __func__);
2056 } 2205 }
2057 2206
2058 return rval; 2207 return rval;
@@ -2078,7 +2227,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2078 mbx_cmd_t mc; 2227 mbx_cmd_t mc;
2079 mbx_cmd_t *mcp = &mc; 2228 mbx_cmd_t *mcp = &mc;
2080 2229
2081 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__); 2230 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2231 "Entered %s.\n", __func__);
2082 2232
2083 if (id_list == NULL) 2233 if (id_list == NULL)
2084 return QLA_FUNCTION_FAILED; 2234 return QLA_FUNCTION_FAILED;
@@ -2110,7 +2260,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2110 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2260 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2111 } else { 2261 } else {
2112 *entries = mcp->mb[1]; 2262 *entries = mcp->mb[1];
2113 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__); 2263 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2264 "Done %s.\n", __func__);
2114 } 2265 }
2115 2266
2116 return rval; 2267 return rval;
@@ -2138,7 +2289,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2138 mbx_cmd_t mc; 2289 mbx_cmd_t mc;
2139 mbx_cmd_t *mcp = &mc; 2290 mbx_cmd_t *mcp = &mc;
2140 2291
2141 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__); 2292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2293 "Entered %s.\n", __func__);
2142 2294
2143 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2295 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2144 mcp->out_mb = MBX_0; 2296 mcp->out_mb = MBX_0;
@@ -2154,7 +2306,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2154 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2306 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2155 "Failed mb[0]=%x.\n", mcp->mb[0]); 2307 "Failed mb[0]=%x.\n", mcp->mb[0]);
2156 } else { 2308 } else {
2157 ql_dbg(ql_dbg_mbx, vha, 0x107e, 2309 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2158 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2310 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2159 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2311 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2160 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2312 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
@@ -2201,7 +2353,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2201 dma_addr_t pmap_dma; 2353 dma_addr_t pmap_dma;
2202 struct qla_hw_data *ha = vha->hw; 2354 struct qla_hw_data *ha = vha->hw;
2203 2355
2204 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__); 2356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2357 "Entered %s.\n", __func__);
2205 2358
2206 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2359 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2207 if (pmap == NULL) { 2360 if (pmap == NULL) {
@@ -2224,7 +2377,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2224 rval = qla2x00_mailbox_command(vha, mcp); 2377 rval = qla2x00_mailbox_command(vha, mcp);
2225 2378
2226 if (rval == QLA_SUCCESS) { 2379 if (rval == QLA_SUCCESS) {
2227 ql_dbg(ql_dbg_mbx, vha, 0x1081, 2380 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2228 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 2381 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2229 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 2382 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2230 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 2383 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
@@ -2238,7 +2391,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2238 if (rval != QLA_SUCCESS) { 2391 if (rval != QLA_SUCCESS) {
2239 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 2392 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2240 } else { 2393 } else {
2241 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__); 2394 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2395 "Done %s.\n", __func__);
2242 } 2396 }
2243 2397
2244 return rval; 2398 return rval;
@@ -2267,7 +2421,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2267 uint32_t *siter, *diter, dwords; 2421 uint32_t *siter, *diter, dwords;
2268 struct qla_hw_data *ha = vha->hw; 2422 struct qla_hw_data *ha = vha->hw;
2269 2423
2270 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__); 2424 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2425 "Entered %s.\n", __func__);
2271 2426
2272 mcp->mb[0] = MBC_GET_LINK_STATUS; 2427 mcp->mb[0] = MBC_GET_LINK_STATUS;
2273 mcp->mb[2] = MSW(stats_dma); 2428 mcp->mb[2] = MSW(stats_dma);
@@ -2301,7 +2456,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2301 rval = QLA_FUNCTION_FAILED; 2456 rval = QLA_FUNCTION_FAILED;
2302 } else { 2457 } else {
2303 /* Copy over data -- firmware data is LE. */ 2458 /* Copy over data -- firmware data is LE. */
2304 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__); 2459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2460 "Done %s.\n", __func__);
2305 dwords = offsetof(struct link_statistics, unused1) / 4; 2461 dwords = offsetof(struct link_statistics, unused1) / 4;
2306 siter = diter = &stats->link_fail_cnt; 2462 siter = diter = &stats->link_fail_cnt;
2307 while (dwords--) 2463 while (dwords--)
@@ -2324,7 +2480,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2324 mbx_cmd_t *mcp = &mc; 2480 mbx_cmd_t *mcp = &mc;
2325 uint32_t *siter, *diter, dwords; 2481 uint32_t *siter, *diter, dwords;
2326 2482
2327 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__); 2483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2484 "Entered %s.\n", __func__);
2328 2485
2329 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2486 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2330 mcp->mb[2] = MSW(stats_dma); 2487 mcp->mb[2] = MSW(stats_dma);
@@ -2346,7 +2503,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2346 "Failed mb[0]=%x.\n", mcp->mb[0]); 2503 "Failed mb[0]=%x.\n", mcp->mb[0]);
2347 rval = QLA_FUNCTION_FAILED; 2504 rval = QLA_FUNCTION_FAILED;
2348 } else { 2505 } else {
2349 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__); 2506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2507 "Done %s.\n", __func__);
2350 /* Copy over data -- firmware data is LE. */ 2508 /* Copy over data -- firmware data is LE. */
2351 dwords = sizeof(struct link_statistics) / 4; 2509 dwords = sizeof(struct link_statistics) / 4;
2352 siter = diter = &stats->link_fail_cnt; 2510 siter = diter = &stats->link_fail_cnt;
@@ -2375,7 +2533,8 @@ qla24xx_abort_command(srb_t *sp)
2375 struct qla_hw_data *ha = vha->hw; 2533 struct qla_hw_data *ha = vha->hw;
2376 struct req_que *req = vha->req; 2534 struct req_que *req = vha->req;
2377 2535
2378 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__); 2536 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2537 "Entered %s.\n", __func__);
2379 2538
2380 spin_lock_irqsave(&ha->hardware_lock, flags); 2539 spin_lock_irqsave(&ha->hardware_lock, flags);
2381 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2540 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2404,7 +2563,7 @@ qla24xx_abort_command(srb_t *sp)
2404 abt->port_id[0] = fcport->d_id.b.al_pa; 2563 abt->port_id[0] = fcport->d_id.b.al_pa;
2405 abt->port_id[1] = fcport->d_id.b.area; 2564 abt->port_id[1] = fcport->d_id.b.area;
2406 abt->port_id[2] = fcport->d_id.b.domain; 2565 abt->port_id[2] = fcport->d_id.b.domain;
2407 abt->vp_index = fcport->vp_idx; 2566 abt->vp_index = fcport->vha->vp_idx;
2408 2567
2409 abt->req_que_no = cpu_to_le16(req->id); 2568 abt->req_que_no = cpu_to_le16(req->id);
2410 2569
@@ -2423,7 +2582,8 @@ qla24xx_abort_command(srb_t *sp)
2423 le16_to_cpu(abt->nport_handle)); 2582 le16_to_cpu(abt->nport_handle));
2424 rval = QLA_FUNCTION_FAILED; 2583 rval = QLA_FUNCTION_FAILED;
2425 } else { 2584 } else {
2426 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__); 2585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
2586 "Done %s.\n", __func__);
2427 } 2587 }
2428 2588
2429 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2589 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2455,7 +2615,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2455 ha = vha->hw; 2615 ha = vha->hw;
2456 req = vha->req; 2616 req = vha->req;
2457 2617
2458 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__); 2618 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
2619 "Entered %s.\n", __func__);
2459 2620
2460 if (ha->flags.cpu_affinity_enabled) 2621 if (ha->flags.cpu_affinity_enabled)
2461 rsp = ha->rsp_q_map[tag + 1]; 2622 rsp = ha->rsp_q_map[tag + 1];
@@ -2478,7 +2639,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2478 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 2639 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2479 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 2640 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2480 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 2641 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2481 tsk->p.tsk.vp_index = fcport->vp_idx; 2642 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
2482 if (type == TCF_LUN_RESET) { 2643 if (type == TCF_LUN_RESET) {
2483 int_to_scsilun(l, &tsk->p.tsk.lun); 2644 int_to_scsilun(l, &tsk->p.tsk.lun);
2484 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 2645 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
@@ -2504,7 +2665,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2504 } else if (le16_to_cpu(sts->scsi_status) & 2665 } else if (le16_to_cpu(sts->scsi_status) &
2505 SS_RESPONSE_INFO_LEN_VALID) { 2666 SS_RESPONSE_INFO_LEN_VALID) {
2506 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2667 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2507 ql_dbg(ql_dbg_mbx, vha, 0x1097, 2668 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
2508 "Ignoring inconsistent data length -- not enough " 2669 "Ignoring inconsistent data length -- not enough "
2509 "response info (%d).\n", 2670 "response info (%d).\n",
2510 le32_to_cpu(sts->rsp_data_len)); 2671 le32_to_cpu(sts->rsp_data_len));
@@ -2523,7 +2684,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2523 ql_dbg(ql_dbg_mbx, vha, 0x1099, 2684 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2524 "Failed to issue marker IOCB (%x).\n", rval2); 2685 "Failed to issue marker IOCB (%x).\n", rval2);
2525 } else { 2686 } else {
2526 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__); 2687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
2688 "Done %s.\n", __func__);
2527 } 2689 }
2528 2690
2529 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 2691 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2564,7 +2726,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2564 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2726 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2565 return QLA_FUNCTION_FAILED; 2727 return QLA_FUNCTION_FAILED;
2566 2728
2567 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__); 2729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
2730 "Entered %s.\n", __func__);
2568 2731
2569 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2732 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2570 mcp->out_mb = MBX_0; 2733 mcp->out_mb = MBX_0;
@@ -2576,7 +2739,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2576 if (rval != QLA_SUCCESS) { 2739 if (rval != QLA_SUCCESS) {
2577 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 2740 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2578 } else { 2741 } else {
2579 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__); 2742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
2743 "Done %s.\n", __func__);
2580 } 2744 }
2581 2745
2582 return rval; 2746 return rval;
@@ -2596,7 +2760,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2596 mbx_cmd_t mc; 2760 mbx_cmd_t mc;
2597 mbx_cmd_t *mcp = &mc; 2761 mbx_cmd_t *mcp = &mc;
2598 2762
2599 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__); 2763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
2764 "Entered %s.\n", __func__);
2600 2765
2601 mcp->mb[0] = MBC_SERDES_PARAMS; 2766 mcp->mb[0] = MBC_SERDES_PARAMS;
2602 mcp->mb[1] = BIT_0; 2767 mcp->mb[1] = BIT_0;
@@ -2615,7 +2780,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2615 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 2780 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2616 } else { 2781 } else {
2617 /*EMPTY*/ 2782 /*EMPTY*/
2618 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__); 2783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
2784 "Done %s.\n", __func__);
2619 } 2785 }
2620 2786
2621 return rval; 2787 return rval;
@@ -2631,7 +2797,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2631 if (!IS_FWI2_CAPABLE(vha->hw)) 2797 if (!IS_FWI2_CAPABLE(vha->hw))
2632 return QLA_FUNCTION_FAILED; 2798 return QLA_FUNCTION_FAILED;
2633 2799
2634 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__); 2800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
2801 "Entered %s.\n", __func__);
2635 2802
2636 mcp->mb[0] = MBC_STOP_FIRMWARE; 2803 mcp->mb[0] = MBC_STOP_FIRMWARE;
2637 mcp->mb[1] = 0; 2804 mcp->mb[1] = 0;
@@ -2646,7 +2813,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2646 if (mcp->mb[0] == MBS_INVALID_COMMAND) 2813 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2647 rval = QLA_INVALID_COMMAND; 2814 rval = QLA_INVALID_COMMAND;
2648 } else { 2815 } else {
2649 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__); 2816 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
2817 "Done %s.\n", __func__);
2650 } 2818 }
2651 2819
2652 return rval; 2820 return rval;
@@ -2660,7 +2828,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2660 mbx_cmd_t mc; 2828 mbx_cmd_t mc;
2661 mbx_cmd_t *mcp = &mc; 2829 mbx_cmd_t *mcp = &mc;
2662 2830
2663 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__); 2831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
2832 "Entered %s.\n", __func__);
2664 2833
2665 if (!IS_FWI2_CAPABLE(vha->hw)) 2834 if (!IS_FWI2_CAPABLE(vha->hw))
2666 return QLA_FUNCTION_FAILED; 2835 return QLA_FUNCTION_FAILED;
@@ -2686,7 +2855,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2686 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2855 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2687 rval, mcp->mb[0], mcp->mb[1]); 2856 rval, mcp->mb[0], mcp->mb[1]);
2688 } else { 2857 } else {
2689 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__); 2858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
2859 "Done %s.\n", __func__);
2690 } 2860 }
2691 2861
2692 return rval; 2862 return rval;
@@ -2699,7 +2869,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2699 mbx_cmd_t mc; 2869 mbx_cmd_t mc;
2700 mbx_cmd_t *mcp = &mc; 2870 mbx_cmd_t *mcp = &mc;
2701 2871
2702 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__); 2872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
2873 "Entered %s.\n", __func__);
2703 2874
2704 if (!IS_FWI2_CAPABLE(vha->hw)) 2875 if (!IS_FWI2_CAPABLE(vha->hw))
2705 return QLA_FUNCTION_FAILED; 2876 return QLA_FUNCTION_FAILED;
@@ -2719,7 +2890,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2719 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2890 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2720 rval, mcp->mb[0], mcp->mb[1]); 2891 rval, mcp->mb[0], mcp->mb[1]);
2721 } else { 2892 } else {
2722 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__); 2893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
2894 "Done %s.\n", __func__);
2723 } 2895 }
2724 2896
2725 return rval; 2897 return rval;
@@ -2733,7 +2905,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2733 mbx_cmd_t mc; 2905 mbx_cmd_t mc;
2734 mbx_cmd_t *mcp = &mc; 2906 mbx_cmd_t *mcp = &mc;
2735 2907
2736 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__); 2908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
2909 "Entered %s.\n", __func__);
2737 2910
2738 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 2911 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
2739 !IS_QLA83XX(vha->hw)) 2912 !IS_QLA83XX(vha->hw))
@@ -2764,7 +2937,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2764 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2937 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2765 rval, mcp->mb[0], mcp->mb[1]); 2938 rval, mcp->mb[0], mcp->mb[1]);
2766 } else { 2939 } else {
2767 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__); 2940 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
2941 "Done %s.\n", __func__);
2768 2942
2769 if (mb) 2943 if (mb)
2770 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2944 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2782,7 +2956,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2782 mbx_cmd_t mc; 2956 mbx_cmd_t mc;
2783 mbx_cmd_t *mcp = &mc; 2957 mbx_cmd_t *mcp = &mc;
2784 2958
2785 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__); 2959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
2960 "Entered %s.\n", __func__);
2786 2961
2787 if (!IS_FWI2_CAPABLE(vha->hw)) 2962 if (!IS_FWI2_CAPABLE(vha->hw))
2788 return QLA_FUNCTION_FAILED; 2963 return QLA_FUNCTION_FAILED;
@@ -2804,7 +2979,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2804 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2979 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2805 rval, mcp->mb[0], mcp->mb[1]); 2980 rval, mcp->mb[0], mcp->mb[1]);
2806 } else { 2981 } else {
2807 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__); 2982 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
2983 "Done %s.\n", __func__);
2808 2984
2809 if (wr) 2985 if (wr)
2810 *wr = (uint64_t) mcp->mb[5] << 48 | 2986 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2829,7 +3005,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2829 mbx_cmd_t mc; 3005 mbx_cmd_t mc;
2830 mbx_cmd_t *mcp = &mc; 3006 mbx_cmd_t *mcp = &mc;
2831 3007
2832 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__); 3008 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3009 "Entered %s.\n", __func__);
2833 3010
2834 if (!IS_IIDMA_CAPABLE(vha->hw)) 3011 if (!IS_IIDMA_CAPABLE(vha->hw))
2835 return QLA_FUNCTION_FAILED; 3012 return QLA_FUNCTION_FAILED;
@@ -2854,7 +3031,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2854 if (rval != QLA_SUCCESS) { 3031 if (rval != QLA_SUCCESS) {
2855 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3032 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2856 } else { 3033 } else {
2857 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__); 3034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3035 "Done %s.\n", __func__);
2858 if (port_speed) 3036 if (port_speed)
2859 *port_speed = mcp->mb[3]; 3037 *port_speed = mcp->mb[3];
2860 } 3038 }
@@ -2870,7 +3048,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2870 mbx_cmd_t mc; 3048 mbx_cmd_t mc;
2871 mbx_cmd_t *mcp = &mc; 3049 mbx_cmd_t *mcp = &mc;
2872 3050
2873 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__); 3051 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3052 "Entered %s.\n", __func__);
2874 3053
2875 if (!IS_IIDMA_CAPABLE(vha->hw)) 3054 if (!IS_IIDMA_CAPABLE(vha->hw))
2876 return QLA_FUNCTION_FAILED; 3055 return QLA_FUNCTION_FAILED;
@@ -2897,9 +3076,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2897 } 3076 }
2898 3077
2899 if (rval != QLA_SUCCESS) { 3078 if (rval != QLA_SUCCESS) {
2900 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval); 3079 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3080 "Failed=%x.\n", rval);
2901 } else { 3081 } else {
2902 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__); 3082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3083 "Done %s.\n", __func__);
2903 } 3084 }
2904 3085
2905 return rval; 3086 return rval;
@@ -2915,24 +3096,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2915 scsi_qla_host_t *vp; 3096 scsi_qla_host_t *vp;
2916 unsigned long flags; 3097 unsigned long flags;
2917 3098
2918 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__); 3099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3100 "Entered %s.\n", __func__);
2919 3101
2920 if (rptid_entry->entry_status != 0) 3102 if (rptid_entry->entry_status != 0)
2921 return; 3103 return;
2922 3104
2923 if (rptid_entry->format == 0) { 3105 if (rptid_entry->format == 0) {
2924 ql_dbg(ql_dbg_mbx, vha, 0x10b7, 3106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
2925 "Format 0 : Number of VPs setup %d, number of " 3107 "Format 0 : Number of VPs setup %d, number of "
2926 "VPs acquired %d.\n", 3108 "VPs acquired %d.\n",
2927 MSB(le16_to_cpu(rptid_entry->vp_count)), 3109 MSB(le16_to_cpu(rptid_entry->vp_count)),
2928 LSB(le16_to_cpu(rptid_entry->vp_count))); 3110 LSB(le16_to_cpu(rptid_entry->vp_count)));
2929 ql_dbg(ql_dbg_mbx, vha, 0x10b8, 3111 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
2930 "Primary port id %02x%02x%02x.\n", 3112 "Primary port id %02x%02x%02x.\n",
2931 rptid_entry->port_id[2], rptid_entry->port_id[1], 3113 rptid_entry->port_id[2], rptid_entry->port_id[1],
2932 rptid_entry->port_id[0]); 3114 rptid_entry->port_id[0]);
2933 } else if (rptid_entry->format == 1) { 3115 } else if (rptid_entry->format == 1) {
2934 vp_idx = LSB(stat); 3116 vp_idx = LSB(stat);
2935 ql_dbg(ql_dbg_mbx, vha, 0x10b9, 3117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
2936 "Format 1: VP[%d] enabled - status %d - with " 3118 "Format 1: VP[%d] enabled - status %d - with "
2937 "port id %02x%02x%02x.\n", vp_idx, MSB(stat), 3119 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2938 rptid_entry->port_id[2], rptid_entry->port_id[1], 3120 rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2999,7 +3181,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2999 3181
3000 /* This can be called by the parent */ 3182 /* This can be called by the parent */
3001 3183
3002 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__); 3184 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3185 "Entered %s.\n", __func__);
3003 3186
3004 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 3187 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3005 if (!vpmod) { 3188 if (!vpmod) {
@@ -3015,6 +3198,9 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3015 vpmod->vp_count = 1; 3198 vpmod->vp_count = 1;
3016 vpmod->vp_index1 = vha->vp_idx; 3199 vpmod->vp_index1 = vha->vp_idx;
3017 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 3200 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3201
3202 qlt_modify_vp_config(vha, vpmod);
3203
3018 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 3204 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3019 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 3205 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3020 vpmod->entry_count = 1; 3206 vpmod->entry_count = 1;
@@ -3035,7 +3221,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3035 rval = QLA_FUNCTION_FAILED; 3221 rval = QLA_FUNCTION_FAILED;
3036 } else { 3222 } else {
3037 /* EMPTY */ 3223 /* EMPTY */
3038 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__); 3224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3225 "Done %s.\n", __func__);
3039 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 3226 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3040 } 3227 }
3041 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 3228 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3069,7 +3256,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3069 int vp_index = vha->vp_idx; 3256 int vp_index = vha->vp_idx;
3070 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3257 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3071 3258
3072 ql_dbg(ql_dbg_mbx, vha, 0x10c1, 3259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3073 "Entered %s enabling index %d.\n", __func__, vp_index); 3260 "Entered %s enabling index %d.\n", __func__, vp_index);
3074 3261
3075 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 3262 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
@@ -3112,7 +3299,8 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3112 le16_to_cpu(vce->comp_status)); 3299 le16_to_cpu(vce->comp_status));
3113 rval = QLA_FUNCTION_FAILED; 3300 rval = QLA_FUNCTION_FAILED;
3114 } else { 3301 } else {
3115 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__); 3302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
3303 "Done %s.\n", __func__);
3116 } 3304 }
3117 3305
3118 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 3306 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3149,14 +3337,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3149 mbx_cmd_t mc; 3337 mbx_cmd_t mc;
3150 mbx_cmd_t *mcp = &mc; 3338 mbx_cmd_t *mcp = &mc;
3151 3339
3152 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__); 3340 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
3153 3341 "Entered %s.\n", __func__);
3154 /*
3155 * This command is implicitly executed by firmware during login for the
3156 * physical hosts
3157 */
3158 if (vp_idx == 0)
3159 return QLA_FUNCTION_FAILED;
3160 3342
3161 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 3343 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3162 mcp->mb[1] = format; 3344 mcp->mb[1] = format;
@@ -3185,7 +3367,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3185 mbx_cmd_t mc; 3367 mbx_cmd_t mc;
3186 mbx_cmd_t *mcp = &mc; 3368 mbx_cmd_t *mcp = &mc;
3187 3369
3188 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__); 3370 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
3371 "Entered %s.\n", __func__);
3189 3372
3190 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 3373 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3191 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 3374 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3219,7 +3402,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3219 ql_dbg(ql_dbg_mbx, vha, 0x1008, 3402 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3220 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3403 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3221 } else { 3404 } else {
3222 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__); 3405 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
3406 "Done %s.\n", __func__);
3223 } 3407 }
3224 3408
3225 return rval; 3409 return rval;
@@ -3244,7 +3428,8 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3244 unsigned long flags; 3428 unsigned long flags;
3245 struct qla_hw_data *ha = vha->hw; 3429 struct qla_hw_data *ha = vha->hw;
3246 3430
3247 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__); 3431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
3432 "Entered %s.\n", __func__);
3248 3433
3249 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3434 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3250 if (mn == NULL) { 3435 if (mn == NULL) {
@@ -3285,7 +3470,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3285 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 3470 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3286 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3471 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3287 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3472 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3288 ql_dbg(ql_dbg_mbx, vha, 0x10ce, 3473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
3289 "cs=%x fc=%x.\n", status[0], status[1]); 3474 "cs=%x fc=%x.\n", status[0], status[1]);
3290 3475
3291 if (status[0] != CS_COMPLETE) { 3476 if (status[0] != CS_COMPLETE) {
@@ -3299,7 +3484,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3299 retry = 1; 3484 retry = 1;
3300 } 3485 }
3301 } else { 3486 } else {
3302 ql_dbg(ql_dbg_mbx, vha, 0x10d0, 3487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
3303 "Firmware updated to %x.\n", 3488 "Firmware updated to %x.\n",
3304 le32_to_cpu(mn->p.rsp.fw_ver)); 3489 le32_to_cpu(mn->p.rsp.fw_ver));
3305 3490
@@ -3316,9 +3501,11 @@ verify_done:
3316 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 3501 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3317 3502
3318 if (rval != QLA_SUCCESS) { 3503 if (rval != QLA_SUCCESS) {
3319 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval); 3504 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
3505 "Failed=%x.\n", rval);
3320 } else { 3506 } else {
3321 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__); 3507 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
3508 "Done %s.\n", __func__);
3322 } 3509 }
3323 3510
3324 return rval; 3511 return rval;
@@ -3334,7 +3521,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3334 struct device_reg_25xxmq __iomem *reg; 3521 struct device_reg_25xxmq __iomem *reg;
3335 struct qla_hw_data *ha = vha->hw; 3522 struct qla_hw_data *ha = vha->hw;
3336 3523
3337 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__); 3524 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
3525 "Entered %s.\n", __func__);
3338 3526
3339 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3527 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3340 mcp->mb[1] = req->options; 3528 mcp->mb[1] = req->options;
@@ -3388,7 +3576,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3388 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 3576 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3389 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3577 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3390 } else { 3578 } else {
3391 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__); 3579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
3580 "Done %s.\n", __func__);
3392 } 3581 }
3393 3582
3394 return rval; 3583 return rval;
@@ -3404,7 +3593,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3404 struct device_reg_25xxmq __iomem *reg; 3593 struct device_reg_25xxmq __iomem *reg;
3405 struct qla_hw_data *ha = vha->hw; 3594 struct qla_hw_data *ha = vha->hw;
3406 3595
3407 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__); 3596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
3597 "Entered %s.\n", __func__);
3408 3598
3409 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3599 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3410 mcp->mb[1] = rsp->options; 3600 mcp->mb[1] = rsp->options;
@@ -3456,7 +3646,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3456 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 3646 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3457 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3647 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3458 } else { 3648 } else {
3459 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__); 3649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
3650 "Done %s.\n", __func__);
3460 } 3651 }
3461 3652
3462 return rval; 3653 return rval;
@@ -3469,7 +3660,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3469 mbx_cmd_t mc; 3660 mbx_cmd_t mc;
3470 mbx_cmd_t *mcp = &mc; 3661 mbx_cmd_t *mcp = &mc;
3471 3662
3472 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__); 3663 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
3664 "Entered %s.\n", __func__);
3473 3665
3474 mcp->mb[0] = MBC_IDC_ACK; 3666 mcp->mb[0] = MBC_IDC_ACK;
3475 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 3667 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3483,7 +3675,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3483 ql_dbg(ql_dbg_mbx, vha, 0x10da, 3675 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3484 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3676 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3485 } else { 3677 } else {
3486 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__); 3678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
3679 "Done %s.\n", __func__);
3487 } 3680 }
3488 3681
3489 return rval; 3682 return rval;
@@ -3496,7 +3689,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3496 mbx_cmd_t mc; 3689 mbx_cmd_t mc;
3497 mbx_cmd_t *mcp = &mc; 3690 mbx_cmd_t *mcp = &mc;
3498 3691
3499 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__); 3692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
3693 "Entered %s.\n", __func__);
3500 3694
3501 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3695 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3502 return QLA_FUNCTION_FAILED; 3696 return QLA_FUNCTION_FAILED;
@@ -3514,7 +3708,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3514 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3708 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3515 rval, mcp->mb[0], mcp->mb[1]); 3709 rval, mcp->mb[0], mcp->mb[1]);
3516 } else { 3710 } else {
3517 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__); 3711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
3712 "Done %s.\n", __func__);
3518 *sector_size = mcp->mb[1]; 3713 *sector_size = mcp->mb[1];
3519 } 3714 }
3520 3715
@@ -3531,7 +3726,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3531 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3726 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3532 return QLA_FUNCTION_FAILED; 3727 return QLA_FUNCTION_FAILED;
3533 3728
3534 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__); 3729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
3730 "Entered %s.\n", __func__);
3535 3731
3536 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3732 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3537 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 3733 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3547,7 +3743,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3547 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3743 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3548 rval, mcp->mb[0], mcp->mb[1]); 3744 rval, mcp->mb[0], mcp->mb[1]);
3549 } else { 3745 } else {
3550 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__); 3746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
3747 "Done %s.\n", __func__);
3551 } 3748 }
3552 3749
3553 return rval; 3750 return rval;
@@ -3563,7 +3760,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3563 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3760 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3564 return QLA_FUNCTION_FAILED; 3761 return QLA_FUNCTION_FAILED;
3565 3762
3566 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__); 3763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
3764 "Entered %s.\n", __func__);
3567 3765
3568 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3766 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3569 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 3767 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3582,7 +3780,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3582 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 3780 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3583 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 3781 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3584 } else { 3782 } else {
3585 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__); 3783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
3784 "Done %s.\n", __func__);
3586 } 3785 }
3587 3786
3588 return rval; 3787 return rval;
@@ -3595,7 +3794,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3595 mbx_cmd_t mc; 3794 mbx_cmd_t mc;
3596 mbx_cmd_t *mcp = &mc; 3795 mbx_cmd_t *mcp = &mc;
3597 3796
3598 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__); 3797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
3798 "Entered %s.\n", __func__);
3599 3799
3600 mcp->mb[0] = MBC_RESTART_MPI_FW; 3800 mcp->mb[0] = MBC_RESTART_MPI_FW;
3601 mcp->out_mb = MBX_0; 3801 mcp->out_mb = MBX_0;
@@ -3609,7 +3809,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3609 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3809 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3610 rval, mcp->mb[0], mcp->mb[1]); 3810 rval, mcp->mb[0], mcp->mb[1]);
3611 } else { 3811 } else {
3612 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__); 3812 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
3813 "Done %s.\n", __func__);
3613 } 3814 }
3614 3815
3615 return rval; 3816 return rval;
@@ -3624,7 +3825,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3624 mbx_cmd_t *mcp = &mc; 3825 mbx_cmd_t *mcp = &mc;
3625 struct qla_hw_data *ha = vha->hw; 3826 struct qla_hw_data *ha = vha->hw;
3626 3827
3627 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__); 3828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
3829 "Entered %s.\n", __func__);
3628 3830
3629 if (!IS_FWI2_CAPABLE(ha)) 3831 if (!IS_FWI2_CAPABLE(ha))
3630 return QLA_FUNCTION_FAILED; 3832 return QLA_FUNCTION_FAILED;
@@ -3654,7 +3856,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3654 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 3856 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3655 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3857 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3656 } else { 3858 } else {
3657 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__); 3859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
3860 "Done %s.\n", __func__);
3658 } 3861 }
3659 3862
3660 return rval; 3863 return rval;
@@ -3669,7 +3872,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3669 mbx_cmd_t *mcp = &mc; 3872 mbx_cmd_t *mcp = &mc;
3670 struct qla_hw_data *ha = vha->hw; 3873 struct qla_hw_data *ha = vha->hw;
3671 3874
3672 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__); 3875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
3876 "Entered %s.\n", __func__);
3673 3877
3674 if (!IS_FWI2_CAPABLE(ha)) 3878 if (!IS_FWI2_CAPABLE(ha))
3675 return QLA_FUNCTION_FAILED; 3879 return QLA_FUNCTION_FAILED;
@@ -3699,7 +3903,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3699 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 3903 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3700 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3904 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3701 } else { 3905 } else {
3702 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__); 3906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
3907 "Done %s.\n", __func__);
3703 } 3908 }
3704 3909
3705 return rval; 3910 return rval;
@@ -3713,7 +3918,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3713 mbx_cmd_t mc; 3918 mbx_cmd_t mc;
3714 mbx_cmd_t *mcp = &mc; 3919 mbx_cmd_t *mcp = &mc;
3715 3920
3716 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__); 3921 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
3922 "Entered %s.\n", __func__);
3717 3923
3718 if (!IS_CNA_CAPABLE(vha->hw)) 3924 if (!IS_CNA_CAPABLE(vha->hw))
3719 return QLA_FUNCTION_FAILED; 3925 return QLA_FUNCTION_FAILED;
@@ -3735,7 +3941,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3735 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 3941 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3736 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 3942 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3737 } else { 3943 } else {
3738 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__); 3944 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
3945 "Done %s.\n", __func__);
3739 3946
3740 3947
3741 *actual_size = mcp->mb[2] << 2; 3948 *actual_size = mcp->mb[2] << 2;
@@ -3752,7 +3959,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3752 mbx_cmd_t mc; 3959 mbx_cmd_t mc;
3753 mbx_cmd_t *mcp = &mc; 3960 mbx_cmd_t *mcp = &mc;
3754 3961
3755 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__); 3962 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
3963 "Entered %s.\n", __func__);
3756 3964
3757 if (!IS_CNA_CAPABLE(vha->hw)) 3965 if (!IS_CNA_CAPABLE(vha->hw))
3758 return QLA_FUNCTION_FAILED; 3966 return QLA_FUNCTION_FAILED;
@@ -3775,7 +3983,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3775 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 3983 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3776 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 3984 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3777 } else { 3985 } else {
3778 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__); 3986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
3987 "Done %s.\n", __func__);
3779 } 3988 }
3780 3989
3781 return rval; 3990 return rval;
@@ -3788,7 +3997,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3788 mbx_cmd_t mc; 3997 mbx_cmd_t mc;
3789 mbx_cmd_t *mcp = &mc; 3998 mbx_cmd_t *mcp = &mc;
3790 3999
3791 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__); 4000 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4001 "Entered %s.\n", __func__);
3792 4002
3793 if (!IS_FWI2_CAPABLE(vha->hw)) 4003 if (!IS_FWI2_CAPABLE(vha->hw))
3794 return QLA_FUNCTION_FAILED; 4004 return QLA_FUNCTION_FAILED;
@@ -3805,7 +4015,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3805 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 4015 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3806 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4016 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3807 } else { 4017 } else {
3808 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__); 4018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4019 "Done %s.\n", __func__);
3809 *data = mcp->mb[3] << 16 | mcp->mb[2]; 4020 *data = mcp->mb[3] << 16 | mcp->mb[2];
3810 } 4021 }
3811 4022
@@ -3821,7 +4032,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3821 mbx_cmd_t *mcp = &mc; 4032 mbx_cmd_t *mcp = &mc;
3822 uint32_t iter_cnt = 0x1; 4033 uint32_t iter_cnt = 0x1;
3823 4034
3824 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__); 4035 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4036 "Entered %s.\n", __func__);
3825 4037
3826 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4038 memset(mcp->mb, 0 , sizeof(mcp->mb));
3827 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 4039 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3865,7 +4077,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3865 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 4077 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3866 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 4078 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3867 } else { 4079 } else {
3868 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__); 4080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4081 "Done %s.\n", __func__);
3869 } 4082 }
3870 4083
3871 /* Copy mailbox information */ 4084 /* Copy mailbox information */
@@ -3882,7 +4095,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3882 mbx_cmd_t *mcp = &mc; 4095 mbx_cmd_t *mcp = &mc;
3883 struct qla_hw_data *ha = vha->hw; 4096 struct qla_hw_data *ha = vha->hw;
3884 4097
3885 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__); 4098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4099 "Entered %s.\n", __func__);
3886 4100
3887 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4101 memset(mcp->mb, 0 , sizeof(mcp->mb));
3888 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 4102 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3926,7 +4140,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3926 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4140 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3927 rval, mcp->mb[0], mcp->mb[1]); 4141 rval, mcp->mb[0], mcp->mb[1]);
3928 } else { 4142 } else {
3929 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__); 4143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
4144 "Done %s.\n", __func__);
3930 } 4145 }
3931 4146
3932 /* Copy mailbox information */ 4147 /* Copy mailbox information */
@@ -3941,7 +4156,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3941 mbx_cmd_t mc; 4156 mbx_cmd_t mc;
3942 mbx_cmd_t *mcp = &mc; 4157 mbx_cmd_t *mcp = &mc;
3943 4158
3944 ql_dbg(ql_dbg_mbx, vha, 0x10fd, 4159 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
3945 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 4160 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3946 4161
3947 mcp->mb[0] = MBC_ISP84XX_RESET; 4162 mcp->mb[0] = MBC_ISP84XX_RESET;
@@ -3955,7 +4170,8 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3955 if (rval != QLA_SUCCESS) 4170 if (rval != QLA_SUCCESS)
3956 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 4171 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3957 else 4172 else
3958 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__); 4173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
4174 "Done %s.\n", __func__);
3959 4175
3960 return rval; 4176 return rval;
3961} 4177}
@@ -3967,7 +4183,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3967 mbx_cmd_t mc; 4183 mbx_cmd_t mc;
3968 mbx_cmd_t *mcp = &mc; 4184 mbx_cmd_t *mcp = &mc;
3969 4185
3970 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__); 4186 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
4187 "Entered %s.\n", __func__);
3971 4188
3972 if (!IS_FWI2_CAPABLE(vha->hw)) 4189 if (!IS_FWI2_CAPABLE(vha->hw))
3973 return QLA_FUNCTION_FAILED; 4190 return QLA_FUNCTION_FAILED;
@@ -3986,7 +4203,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3986 ql_dbg(ql_dbg_mbx, vha, 0x1101, 4203 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3987 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4204 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3988 } else { 4205 } else {
3989 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__); 4206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
4207 "Done %s.\n", __func__);
3990 } 4208 }
3991 4209
3992 return rval; 4210 return rval;
@@ -4003,7 +4221,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4003 4221
4004 rval = QLA_SUCCESS; 4222 rval = QLA_SUCCESS;
4005 4223
4006 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__); 4224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
4225 "Entered %s.\n", __func__);
4007 4226
4008 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 4227 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
4009 4228
@@ -4046,7 +4265,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4046 ql_dbg(ql_dbg_mbx, vha, 0x1104, 4265 ql_dbg(ql_dbg_mbx, vha, 0x1104,
4047 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 4266 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
4048 } else { 4267 } else {
4049 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__); 4268 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
4269 "Done %s.\n", __func__);
4050 } 4270 }
4051 4271
4052 return rval; 4272 return rval;
@@ -4060,7 +4280,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4060 mbx_cmd_t *mcp = &mc; 4280 mbx_cmd_t *mcp = &mc;
4061 struct qla_hw_data *ha = vha->hw; 4281 struct qla_hw_data *ha = vha->hw;
4062 4282
4063 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__); 4283 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
4284 "Entered %s.\n", __func__);
4064 4285
4065 if (!IS_FWI2_CAPABLE(ha)) 4286 if (!IS_FWI2_CAPABLE(ha))
4066 return QLA_FUNCTION_FAILED; 4287 return QLA_FUNCTION_FAILED;
@@ -4078,7 +4299,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4078 ql_dbg(ql_dbg_mbx, vha, 0x1107, 4299 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4079 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4300 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4080 } else { 4301 } else {
4081 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__); 4302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
4303 "Done %s.\n", __func__);
4082 if (mcp->mb[1] != 0x7) 4304 if (mcp->mb[1] != 0x7)
4083 ha->link_data_rate = mcp->mb[1]; 4305 ha->link_data_rate = mcp->mb[1];
4084 } 4306 }
@@ -4094,7 +4316,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4094 mbx_cmd_t *mcp = &mc; 4316 mbx_cmd_t *mcp = &mc;
4095 struct qla_hw_data *ha = vha->hw; 4317 struct qla_hw_data *ha = vha->hw;
4096 4318
4097 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__); 4319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
4320 "Entered %s.\n", __func__);
4098 4321
4099 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 4322 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
4100 return QLA_FUNCTION_FAILED; 4323 return QLA_FUNCTION_FAILED;
@@ -4113,7 +4336,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4113 /* Copy all bits to preserve original value */ 4336 /* Copy all bits to preserve original value */
4114 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 4337 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4115 4338
4116 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__); 4339 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
4340 "Done %s.\n", __func__);
4117 } 4341 }
4118 return rval; 4342 return rval;
4119} 4343}
@@ -4125,7 +4349,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4125 mbx_cmd_t mc; 4349 mbx_cmd_t mc;
4126 mbx_cmd_t *mcp = &mc; 4350 mbx_cmd_t *mcp = &mc;
4127 4351
4128 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__); 4352 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
4353 "Entered %s.\n", __func__);
4129 4354
4130 mcp->mb[0] = MBC_SET_PORT_CONFIG; 4355 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4131 /* Copy all bits to preserve original setting */ 4356 /* Copy all bits to preserve original setting */
@@ -4140,7 +4365,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4140 ql_dbg(ql_dbg_mbx, vha, 0x110d, 4365 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4141 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4366 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4142 } else 4367 } else
4143 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__); 4368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
4369 "Done %s.\n", __func__);
4144 4370
4145 return rval; 4371 return rval;
4146} 4372}
@@ -4155,7 +4381,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4155 mbx_cmd_t *mcp = &mc; 4381 mbx_cmd_t *mcp = &mc;
4156 struct qla_hw_data *ha = vha->hw; 4382 struct qla_hw_data *ha = vha->hw;
4157 4383
4158 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__); 4384 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
4385 "Entered %s.\n", __func__);
4159 4386
4160 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 4387 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4161 return QLA_FUNCTION_FAILED; 4388 return QLA_FUNCTION_FAILED;
@@ -4183,7 +4410,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4183 if (rval != QLA_SUCCESS) { 4410 if (rval != QLA_SUCCESS) {
4184 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 4411 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4185 } else { 4412 } else {
4186 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__); 4413 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
4414 "Done %s.\n", __func__);
4187 } 4415 }
4188 4416
4189 return rval; 4417 return rval;
@@ -4196,7 +4424,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4196 uint8_t byte; 4424 uint8_t byte;
4197 struct qla_hw_data *ha = vha->hw; 4425 struct qla_hw_data *ha = vha->hw;
4198 4426
4199 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__); 4427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
4428 "Entered %s.\n", __func__);
4200 4429
4201 /* Integer part */ 4430 /* Integer part */
4202 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); 4431 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
@@ -4216,7 +4445,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4216 } 4445 }
4217 *frac = (byte >> 6) * 25; 4446 *frac = (byte >> 6) * 25;
4218 4447
4219 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__); 4448 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
4449 "Done %s.\n", __func__);
4220fail: 4450fail:
4221 return rval; 4451 return rval;
4222} 4452}
@@ -4229,7 +4459,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4229 mbx_cmd_t mc; 4459 mbx_cmd_t mc;
4230 mbx_cmd_t *mcp = &mc; 4460 mbx_cmd_t *mcp = &mc;
4231 4461
4232 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__); 4462 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
4463 "Entered %s.\n", __func__);
4233 4464
4234 if (!IS_FWI2_CAPABLE(ha)) 4465 if (!IS_FWI2_CAPABLE(ha))
4235 return QLA_FUNCTION_FAILED; 4466 return QLA_FUNCTION_FAILED;
@@ -4248,7 +4479,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4248 ql_dbg(ql_dbg_mbx, vha, 0x1016, 4479 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4249 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4480 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4250 } else { 4481 } else {
4251 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__); 4482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
4483 "Done %s.\n", __func__);
4252 } 4484 }
4253 4485
4254 return rval; 4486 return rval;
@@ -4262,7 +4494,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4262 mbx_cmd_t mc; 4494 mbx_cmd_t mc;
4263 mbx_cmd_t *mcp = &mc; 4495 mbx_cmd_t *mcp = &mc;
4264 4496
4265 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__); 4497 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
4498 "Entered %s.\n", __func__);
4266 4499
4267 if (!IS_QLA82XX(ha)) 4500 if (!IS_QLA82XX(ha))
4268 return QLA_FUNCTION_FAILED; 4501 return QLA_FUNCTION_FAILED;
@@ -4281,7 +4514,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4281 ql_dbg(ql_dbg_mbx, vha, 0x100c, 4514 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4282 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4515 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4283 } else { 4516 } else {
4284 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__); 4517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
4518 "Done %s.\n", __func__);
4285 } 4519 }
4286 4520
4287 return rval; 4521 return rval;
@@ -4295,7 +4529,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4295 mbx_cmd_t *mcp = &mc; 4529 mbx_cmd_t *mcp = &mc;
4296 int rval = QLA_FUNCTION_FAILED; 4530 int rval = QLA_FUNCTION_FAILED;
4297 4531
4298 ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__); 4532 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
4533 "Entered %s.\n", __func__);
4299 4534
4300 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4535 memset(mcp->mb, 0 , sizeof(mcp->mb));
4301 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 4536 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
@@ -4318,7 +4553,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4318 (mcp->mb[1] << 16) | mcp->mb[0], 4553 (mcp->mb[1] << 16) | mcp->mb[0],
4319 (mcp->mb[3] << 16) | mcp->mb[2]); 4554 (mcp->mb[3] << 16) | mcp->mb[2]);
4320 } else { 4555 } else {
4321 ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__); 4556 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
4557 "Done %s.\n", __func__);
4322 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 4558 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4323 if (!ha->md_template_size) { 4559 if (!ha->md_template_size) {
4324 ql_dbg(ql_dbg_mbx, vha, 0x1122, 4560 ql_dbg(ql_dbg_mbx, vha, 0x1122,
@@ -4337,7 +4573,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
4337 mbx_cmd_t *mcp = &mc; 4573 mbx_cmd_t *mcp = &mc;
4338 int rval = QLA_FUNCTION_FAILED; 4574 int rval = QLA_FUNCTION_FAILED;
4339 4575
4340 ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__); 4576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
4577 "Entered %s.\n", __func__);
4341 4578
4342 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 4579 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4343 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 4580 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
@@ -4372,7 +4609,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
4372 ((mcp->mb[1] << 16) | mcp->mb[0]), 4609 ((mcp->mb[1] << 16) | mcp->mb[0]),
4373 ((mcp->mb[3] << 16) | mcp->mb[2])); 4610 ((mcp->mb[3] << 16) | mcp->mb[2]));
4374 } else 4611 } else
4375 ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__); 4612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
4613 "Done %s.\n", __func__);
4376 return rval; 4614 return rval;
4377} 4615}
4378 4616
@@ -4387,7 +4625,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4387 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 4625 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4388 return QLA_FUNCTION_FAILED; 4626 return QLA_FUNCTION_FAILED;
4389 4627
4390 ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__); 4628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
4629 "Entered %s.\n", __func__);
4391 4630
4392 memset(mcp, 0, sizeof(mbx_cmd_t)); 4631 memset(mcp, 0, sizeof(mbx_cmd_t));
4393 mcp->mb[0] = MBC_SET_LED_CONFIG; 4632 mcp->mb[0] = MBC_SET_LED_CONFIG;
@@ -4412,7 +4651,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4412 ql_dbg(ql_dbg_mbx, vha, 0x1134, 4651 ql_dbg(ql_dbg_mbx, vha, 0x1134,
4413 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4652 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4414 } else { 4653 } else {
4415 ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__); 4654 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
4655 "Done %s.\n", __func__);
4416 } 4656 }
4417 4657
4418 return rval; 4658 return rval;
@@ -4429,7 +4669,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4429 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 4669 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4430 return QLA_FUNCTION_FAILED; 4670 return QLA_FUNCTION_FAILED;
4431 4671
4432 ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__); 4672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
4673 "Entered %s.\n", __func__);
4433 4674
4434 memset(mcp, 0, sizeof(mbx_cmd_t)); 4675 memset(mcp, 0, sizeof(mbx_cmd_t));
4435 mcp->mb[0] = MBC_GET_LED_CONFIG; 4676 mcp->mb[0] = MBC_GET_LED_CONFIG;
@@ -4454,7 +4695,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4454 led_cfg[4] = mcp->mb[5]; 4695 led_cfg[4] = mcp->mb[5];
4455 led_cfg[5] = mcp->mb[6]; 4696 led_cfg[5] = mcp->mb[6];
4456 } 4697 }
4457 ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__); 4698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
4699 "Done %s.\n", __func__);
4458 } 4700 }
4459 4701
4460 return rval; 4702 return rval;
@@ -4471,7 +4713,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4471 if (!IS_QLA82XX(ha)) 4713 if (!IS_QLA82XX(ha))
4472 return QLA_FUNCTION_FAILED; 4714 return QLA_FUNCTION_FAILED;
4473 4715
4474 ql_dbg(ql_dbg_mbx, vha, 0x1127, 4716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
4475 "Entered %s.\n", __func__); 4717 "Entered %s.\n", __func__);
4476 4718
4477 memset(mcp, 0, sizeof(mbx_cmd_t)); 4719 memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4491,7 +4733,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4491 ql_dbg(ql_dbg_mbx, vha, 0x1128, 4733 ql_dbg(ql_dbg_mbx, vha, 0x1128,
4492 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4734 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4493 } else { 4735 } else {
4494 ql_dbg(ql_dbg_mbx, vha, 0x1129, 4736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
4495 "Done %s.\n", __func__); 4737 "Done %s.\n", __func__);
4496 } 4738 }
4497 4739
@@ -4509,7 +4751,8 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4509 if (!IS_QLA83XX(ha)) 4751 if (!IS_QLA83XX(ha))
4510 return QLA_FUNCTION_FAILED; 4752 return QLA_FUNCTION_FAILED;
4511 4753
4512 ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__); 4754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
4755 "Entered %s.\n", __func__);
4513 4756
4514 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 4757 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
4515 mcp->mb[1] = LSW(reg); 4758 mcp->mb[1] = LSW(reg);
@@ -4527,7 +4770,7 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4527 ql_dbg(ql_dbg_mbx, vha, 0x1131, 4770 ql_dbg(ql_dbg_mbx, vha, 0x1131,
4528 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4771 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4529 } else { 4772 } else {
4530 ql_dbg(ql_dbg_mbx, vha, 0x1132, 4773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
4531 "Done %s.\n", __func__); 4774 "Done %s.\n", __func__);
4532 } 4775 }
4533 4776
@@ -4543,13 +4786,14 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4543 mbx_cmd_t *mcp = &mc; 4786 mbx_cmd_t *mcp = &mc;
4544 4787
4545 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4788 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4546 ql_dbg(ql_dbg_mbx, vha, 0x113b, 4789 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
4547 "Implicit LOGO Unsupported.\n"); 4790 "Implicit LOGO Unsupported.\n");
4548 return QLA_FUNCTION_FAILED; 4791 return QLA_FUNCTION_FAILED;
4549 } 4792 }
4550 4793
4551 4794
4552 ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__); 4795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
4796 "Entering %s.\n", __func__);
4553 4797
4554 /* Perform Implicit LOGO. */ 4798 /* Perform Implicit LOGO. */
4555 mcp->mb[0] = MBC_PORT_LOGOUT; 4799 mcp->mb[0] = MBC_PORT_LOGOUT;
@@ -4564,7 +4808,8 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4564 ql_dbg(ql_dbg_mbx, vha, 0x113d, 4808 ql_dbg(ql_dbg_mbx, vha, 0x113d,
4565 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4809 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4566 else 4810 else
4567 ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__); 4811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
4812 "Done %s.\n", __func__);
4568 4813
4569 return rval; 4814 return rval;
4570} 4815}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index aa062a1b0ca4..3e8b32419e68 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -6,6 +6,7 @@
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_gbl.h" 8#include "qla_gbl.h"
9#include "qla_target.h"
9 10
10#include <linux/moduleparam.h> 11#include <linux/moduleparam.h>
11#include <linux/vmalloc.h> 12#include <linux/vmalloc.h>
@@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
49 50
50 spin_lock_irqsave(&ha->vport_slock, flags); 51 spin_lock_irqsave(&ha->vport_slock, flags);
51 list_add_tail(&vha->list, &ha->vp_list); 52 list_add_tail(&vha->list, &ha->vp_list);
53
54 qlt_update_vp_map(vha, SET_VP_IDX);
55
52 spin_unlock_irqrestore(&ha->vport_slock, flags); 56 spin_unlock_irqrestore(&ha->vport_slock, flags);
53 57
54 mutex_unlock(&ha->vport_lock); 58 mutex_unlock(&ha->vport_lock);
@@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
79 spin_lock_irqsave(&ha->vport_slock, flags); 83 spin_lock_irqsave(&ha->vport_slock, flags);
80 } 84 }
81 list_del(&vha->list); 85 list_del(&vha->list);
86 qlt_update_vp_map(vha, RESET_VP_IDX);
82 spin_unlock_irqrestore(&ha->vport_slock, flags); 87 spin_unlock_irqrestore(&ha->vport_slock, flags);
83 88
84 vp_id = vha->vp_idx; 89 vp_id = vha->vp_idx;
@@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
134 list_for_each_entry(fcport, &vha->vp_fcports, list) { 139 list_for_each_entry(fcport, &vha->vp_fcports, list) {
135 ql_dbg(ql_dbg_vport, vha, 0xa001, 140 ql_dbg(ql_dbg_vport, vha, 0xa001,
136 "Marking port dead, loop_id=0x%04x : %x.\n", 141 "Marking port dead, loop_id=0x%04x : %x.\n",
137 fcport->loop_id, fcport->vp_idx); 142 fcport->loop_id, fcport->vha->vp_idx);
138 143
139 qla2x00_mark_device_lost(vha, fcport, 0, 0); 144 qla2x00_mark_device_lost(vha, fcport, 0, 0);
140 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 145 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
150 atomic_set(&vha->loop_state, LOOP_DOWN); 155 atomic_set(&vha->loop_state, LOOP_DOWN);
151 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 156 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
152 157
158 /* Remove port id from vp target map */
159 qlt_update_vp_map(vha, RESET_AL_PA);
160
153 qla2x00_mark_vp_devices_dead(vha); 161 qla2x00_mark_vp_devices_dead(vha);
154 atomic_set(&vha->vp_state, VP_FAILED); 162 atomic_set(&vha->vp_state, VP_FAILED);
155 vha->flags.management_server_logged_in = 0; 163 vha->flags.management_server_logged_in = 0;
@@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
295static int 303static int
296qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 304qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
297{ 305{
298 ql_dbg(ql_dbg_dpc, vha, 0x4012, 306 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
299 "Entering %s.\n", __func__); 307 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
300 ql_dbg(ql_dbg_dpc, vha, 0x4013,
301 "vp_flags: 0x%lx.\n", vha->vp_flags);
302 308
303 qla2x00_do_work(vha); 309 qla2x00_do_work(vha);
304 310
@@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
348 } 354 }
349 } 355 }
350 356
351 ql_dbg(ql_dbg_dpc, vha, 0x401c, 357 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
352 "Exiting %s.\n", __func__); 358 "Exiting %s.\n", __func__);
353 return 0; 359 return 0;
354} 360}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de722a933438..caf627ba7fa8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1190 } 1190 }
1191 1191
1192 /* Offset in flash = lower 16 bits 1192 /* Offset in flash = lower 16 bits
1193 * Number of enteries = upper 16 bits 1193 * Number of entries = upper 16 bits
1194 */ 1194 */
1195 offset = n & 0xffffU; 1195 offset = n & 0xffffU;
1196 n = (n >> 16) & 0xffffU; 1196 n = (n >> 16) & 0xffffU;
1197 1197
1198 /* number of addr/value pair should not exceed 1024 enteries */ 1198 /* number of addr/value pair should not exceed 1024 entries */
1199 if (n >= 1024) { 1199 if (n >= 1024) {
1200 ql_log(ql_log_fatal, vha, 0x0071, 1200 ql_log(ql_log_fatal, vha, 0x0071,
1201 "Card flash not initialized:n=0x%x.\n", n); 1201 "Card flash not initialized:n=0x%x.\n", n);
@@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
2050 2050
2051 rsp = (struct rsp_que *) dev_id; 2051 rsp = (struct rsp_que *) dev_id;
2052 if (!rsp) { 2052 if (!rsp) {
2053 ql_log(ql_log_info, NULL, 0xb054, 2053 ql_log(ql_log_info, NULL, 0xb053,
2054 "%s: NULL response queue pointer.\n", __func__); 2054 "%s: NULL response queue pointer.\n", __func__);
2055 return IRQ_NONE; 2055 return IRQ_NONE;
2056 } 2056 }
@@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2446 2446
2447 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2447 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2448 ql_log(ql_log_info, vha, 0x00a1, 2448 ql_log(ql_log_info, vha, 0x00a1,
2449 "Firmware loaded successully from flash.\n"); 2449 "Firmware loaded successfully from flash.\n");
2450 return QLA_SUCCESS; 2450 return QLA_SUCCESS;
2451 } else { 2451 } else {
2452 ql_log(ql_log_warn, vha, 0x0108, 2452 ql_log(ql_log_warn, vha, 0x0108,
@@ -2461,7 +2461,7 @@ try_blob_fw:
2461 blob = ha->hablob = qla2x00_request_firmware(vha); 2461 blob = ha->hablob = qla2x00_request_firmware(vha);
2462 if (!blob) { 2462 if (!blob) {
2463 ql_log(ql_log_fatal, vha, 0x00a3, 2463 ql_log(ql_log_fatal, vha, 0x00a3,
2464 "Firmware image not preset.\n"); 2464 "Firmware image not present.\n");
2465 goto fw_load_failed; 2465 goto fw_load_failed;
2466 } 2466 }
2467 2467
@@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
2689 if (!optrom) { 2689 if (!optrom) {
2690 ql_log(ql_log_warn, vha, 0xb01b, 2690 ql_log(ql_log_warn, vha, 0xb01b,
2691 "Unable to allocate memory " 2691 "Unable to allocate memory "
2692 "for optron burst write (%x KB).\n", 2692 "for optrom burst write (%x KB).\n",
2693 OPTROM_BURST_SIZE / 1024); 2693 OPTROM_BURST_SIZE / 1024);
2694 } 2694 }
2695 } 2695 }
@@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2960 * changing the state to DEV_READY 2960 * changing the state to DEV_READY
2961 */ 2961 */
2962 ql_log(ql_log_info, vha, 0xb023, 2962 ql_log(ql_log_info, vha, 0xb023,
2963 "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME); 2963 "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
2964 ql_log(ql_log_info, vha, 0xb024, 2964 "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
2965 "DRV_ACTIVE:%d DRV_STATE:%d.\n",
2966 drv_active, drv_state); 2965 drv_active, drv_state);
2967 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2966 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2968 QLA82XX_DEV_READY); 2967 QLA82XX_DEV_READY);
@@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3129 if (ql2xmdenable) { 3128 if (ql2xmdenable) {
3130 if (qla82xx_md_collect(vha)) 3129 if (qla82xx_md_collect(vha))
3131 ql_log(ql_log_warn, vha, 0xb02c, 3130 ql_log(ql_log_warn, vha, 0xb02c,
3132 "Not able to collect minidump.\n"); 3131 "Minidump not collected.\n");
3133 } else 3132 } else
3134 ql_log(ql_log_warn, vha, 0xb04f, 3133 ql_log(ql_log_warn, vha, 0xb04f,
3135 "Minidump disabled.\n"); 3134 "Minidump disabled.\n");
@@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
3160 "Firmware version differs " 3159 "Firmware version differs "
3161 "Previous version: %d:%d:%d - " 3160 "Previous version: %d:%d:%d - "
3162 "New version: %d:%d:%d\n", 3161 "New version: %d:%d:%d\n",
3162 fw_major_version, fw_minor_version,
3163 fw_subminor_version,
3163 ha->fw_major_version, 3164 ha->fw_major_version,
3164 ha->fw_minor_version, 3165 ha->fw_minor_version,
3165 ha->fw_subminor_version, 3166 ha->fw_subminor_version);
3166 fw_major_version, fw_minor_version,
3167 fw_subminor_version);
3168 /* Release MiniDump resources */ 3167 /* Release MiniDump resources */
3169 qla82xx_md_free(vha); 3168 qla82xx_md_free(vha);
3170 /* ALlocate MiniDump resources */ 3169 /* ALlocate MiniDump resources */
@@ -3325,6 +3324,30 @@ exit:
3325 return rval; 3324 return rval;
3326} 3325}
3327 3326
3327static int qla82xx_check_temp(scsi_qla_host_t *vha)
3328{
3329 uint32_t temp, temp_state, temp_val;
3330 struct qla_hw_data *ha = vha->hw;
3331
3332 temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
3333 temp_state = qla82xx_get_temp_state(temp);
3334 temp_val = qla82xx_get_temp_val(temp);
3335
3336 if (temp_state == QLA82XX_TEMP_PANIC) {
3337 ql_log(ql_log_warn, vha, 0x600e,
3338 "Device temperature %d degrees C exceeds "
3339 " maximum allowed. Hardware has been shut down.\n",
3340 temp_val);
3341 return 1;
3342 } else if (temp_state == QLA82XX_TEMP_WARN) {
3343 ql_log(ql_log_warn, vha, 0x600f,
3344 "Device temperature %d degrees C exceeds "
3345 "operating range. Immediate action needed.\n",
3346 temp_val);
3347 }
3348 return 0;
3349}
3350
3328void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) 3351void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
3329{ 3352{
3330 struct qla_hw_data *ha = vha->hw; 3353 struct qla_hw_data *ha = vha->hw;
@@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3347 /* don't poll if reset is going on */ 3370 /* don't poll if reset is going on */
3348 if (!ha->flags.isp82xx_reset_hdlr_active) { 3371 if (!ha->flags.isp82xx_reset_hdlr_active) {
3349 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3372 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3350 if (dev_state == QLA82XX_DEV_NEED_RESET && 3373 if (qla82xx_check_temp(vha)) {
3374 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3375 ha->flags.isp82xx_fw_hung = 1;
3376 qla82xx_clear_pending_mbx(vha);
3377 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
3351 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3378 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3352 ql_log(ql_log_warn, vha, 0x6001, 3379 ql_log(ql_log_warn, vha, 0x6001,
3353 "Adapter reset needed.\n"); 3380 "Adapter reset needed.\n");
3354 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3381 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3355 qla2xxx_wake_dpc(vha);
3356 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3382 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3357 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3383 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3358 ql_log(ql_log_warn, vha, 0x6002, 3384 ql_log(ql_log_warn, vha, 0x6002,
3359 "Quiescent needed.\n"); 3385 "Quiescent needed.\n");
3360 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3386 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3361 qla2xxx_wake_dpc(vha);
3362 } else { 3387 } else {
3363 if (qla82xx_check_fw_alive(vha)) { 3388 if (qla82xx_check_fw_alive(vha)) {
3364 ql_dbg(ql_dbg_timer, vha, 0x6011, 3389 ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3398 set_bit(ISP_ABORT_NEEDED, 3423 set_bit(ISP_ABORT_NEEDED,
3399 &vha->dpc_flags); 3424 &vha->dpc_flags);
3400 } 3425 }
3401 qla2xxx_wake_dpc(vha);
3402 ha->flags.isp82xx_fw_hung = 1; 3426 ha->flags.isp82xx_fw_hung = 1;
3403 ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); 3427 ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
3404 qla82xx_clear_pending_mbx(vha); 3428 qla82xx_clear_pending_mbx(vha);
@@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
4113 goto md_failed; 4137 goto md_failed;
4114 } 4138 }
4115 4139
4140 if (ha->flags.isp82xx_no_md_cap) {
4141 ql_log(ql_log_warn, vha, 0xb054,
4142 "Forced reset from application, "
4143 "ignore minidump capture\n");
4144 ha->flags.isp82xx_no_md_cap = 0;
4145 goto md_failed;
4146 }
4147
4116 if (qla82xx_validate_template_chksum(vha)) { 4148 if (qla82xx_validate_template_chksum(vha)) {
4117 ql_log(ql_log_info, vha, 0xb039, 4149 ql_log(ql_log_info, vha, 0xb039,
4118 "Template checksum validation error\n"); 4150 "Template checksum validation error\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 4ac50e274661..6eb210e3cc63 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -26,6 +26,7 @@
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) 27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
29#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
29#define QLA82XX_DMA_SHIFT_VALUE 0x55555555 30#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
30 31
31#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 32#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
@@ -561,7 +562,6 @@
561#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158)) 562#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
562#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg)) 563#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
563 564
564#define PCIE_CHICKEN3 (0x120c8)
565#define PCIE_SETUP_FUNCTION (0x12040) 565#define PCIE_SETUP_FUNCTION (0x12040)
566#define PCIE_SETUP_FUNCTION2 (0x12048) 566#define PCIE_SETUP_FUNCTION2 (0x12048)
567 567
@@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
1178#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 1178#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
1179#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 1179#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
1180 1180
1181#define qla82xx_get_temp_val(x) ((x) >> 16)
1182#define qla82xx_get_temp_state(x) ((x) & 0xffff)
1183#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
1184
1185/*
1186 * Temperature control.
1187 */
1188enum {
1189 QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
1190 QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
1191 QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
1192};
1181#endif 1193#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c9c56a8427f3..6d1d873a20e2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,12 +13,13 @@
13#include <linux/mutex.h> 13#include <linux/mutex.h>
14#include <linux/kobject.h> 14#include <linux/kobject.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16
17#include <scsi/scsi_tcq.h> 16#include <scsi/scsi_tcq.h>
18#include <scsi/scsicam.h> 17#include <scsi/scsicam.h>
19#include <scsi/scsi_transport.h> 18#include <scsi/scsi_transport.h>
20#include <scsi/scsi_transport_fc.h> 19#include <scsi/scsi_transport_fc.h>
21 20
21#include "qla_target.h"
22
22/* 23/*
23 * Driver version 24 * Driver version
24 */ 25 */
@@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep;
40 */ 41 */
41int ql_errlev = ql_log_all; 42int ql_errlev = ql_log_all;
42 43
44int ql2xenableclass2;
45module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
46MODULE_PARM_DESC(ql2xenableclass2,
47 "Specify if Class 2 operations are supported from the very "
48 "beginning. Default is 0 - class 2 not supported.");
49
43int ql2xlogintimeout = 20; 50int ql2xlogintimeout = 20;
44module_param(ql2xlogintimeout, int, S_IRUGO); 51module_param(ql2xlogintimeout, int, S_IRUGO);
45MODULE_PARM_DESC(ql2xlogintimeout, 52MODULE_PARM_DESC(ql2xlogintimeout,
@@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = {
255 262
256 .max_sectors = 0xFFFF, 263 .max_sectors = 0xFFFF,
257 .shost_attrs = qla2x00_host_attrs, 264 .shost_attrs = qla2x00_host_attrs,
265
266 .supported_mode = MODE_INITIATOR,
258}; 267};
259 268
260static struct scsi_transport_template *qla2xxx_transport_template = NULL; 269static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *);
306static void qla2x00_mem_free(struct qla_hw_data *); 315static void qla2x00_mem_free(struct qla_hw_data *);
307 316
308/* -------------------------------------------------------------------------- */ 317/* -------------------------------------------------------------------------- */
309static int qla2x00_alloc_queues(struct qla_hw_data *ha) 318static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
319 struct rsp_que *rsp)
310{ 320{
311 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 321 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
312 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 322 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
324 "Unable to allocate memory for response queue ptrs.\n"); 334 "Unable to allocate memory for response queue ptrs.\n");
325 goto fail_rsp_map; 335 goto fail_rsp_map;
326 } 336 }
337 /*
338 * Make sure we record at least the request and response queue zero in
339 * case we need to free them if part of the probe fails.
340 */
341 ha->rsp_q_map[0] = rsp;
342 ha->req_q_map[0] = req;
327 set_bit(0, ha->rsp_qid_map); 343 set_bit(0, ha->rsp_qid_map);
328 set_bit(0, ha->req_qid_map); 344 set_bit(0, ha->req_qid_map);
329 return 1; 345 return 1;
@@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
642 658
643 if (ha->flags.eeh_busy) { 659 if (ha->flags.eeh_busy) {
644 if (ha->flags.pci_channel_io_perm_failure) { 660 if (ha->flags.pci_channel_io_perm_failure) {
645 ql_dbg(ql_dbg_io, vha, 0x3001, 661 ql_dbg(ql_dbg_aer, vha, 0x9010,
646 "PCI Channel IO permanent failure, exiting " 662 "PCI Channel IO permanent failure, exiting "
647 "cmd=%p.\n", cmd); 663 "cmd=%p.\n", cmd);
648 cmd->result = DID_NO_CONNECT << 16; 664 cmd->result = DID_NO_CONNECT << 16;
649 } else { 665 } else {
650 ql_dbg(ql_dbg_io, vha, 0x3002, 666 ql_dbg(ql_dbg_aer, vha, 0x9011,
651 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 667 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
652 cmd->result = DID_REQUEUE << 16; 668 cmd->result = DID_REQUEUE << 16;
653 } 669 }
@@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
657 rval = fc_remote_port_chkready(rport); 673 rval = fc_remote_port_chkready(rport);
658 if (rval) { 674 if (rval) {
659 cmd->result = rval; 675 cmd->result = rval;
660 ql_dbg(ql_dbg_io, vha, 0x3003, 676 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
661 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 677 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
662 cmd, rval); 678 cmd, rval);
663 goto qc24_fail_command; 679 goto qc24_fail_command;
@@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1136 ret = FAILED; 1152 ret = FAILED;
1137 1153
1138 ql_log(ql_log_info, vha, 0x8012, 1154 ql_log(ql_log_info, vha, 0x8012,
1139 "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun); 1155 "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
1140 1156
1141 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1157 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1142 ql_log(ql_log_fatal, vha, 0x8013, 1158 ql_log(ql_log_fatal, vha, 0x8013,
@@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2180 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2196 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2181 "Memory allocated for ha=%p.\n", ha); 2197 "Memory allocated for ha=%p.\n", ha);
2182 ha->pdev = pdev; 2198 ha->pdev = pdev;
2199 ha->tgt.enable_class_2 = ql2xenableclass2;
2183 2200
2184 /* Clear our data area */ 2201 /* Clear our data area */
2185 ha->bars = bars; 2202 ha->bars = bars;
@@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2243 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2260 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2244 req_length = REQUEST_ENTRY_CNT_24XX; 2261 req_length = REQUEST_ENTRY_CNT_24XX;
2245 rsp_length = RESPONSE_ENTRY_CNT_2300; 2262 rsp_length = RESPONSE_ENTRY_CNT_2300;
2263 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2246 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2264 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2247 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2265 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2248 ha->gid_list_info_size = 8; 2266 ha->gid_list_info_size = 8;
@@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2258 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2276 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2259 req_length = REQUEST_ENTRY_CNT_24XX; 2277 req_length = REQUEST_ENTRY_CNT_24XX;
2260 rsp_length = RESPONSE_ENTRY_CNT_2300; 2278 rsp_length = RESPONSE_ENTRY_CNT_2300;
2279 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2261 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2280 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2262 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2281 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2263 ha->gid_list_info_size = 8; 2282 ha->gid_list_info_size = 8;
@@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2417 host->max_cmd_len, host->max_channel, host->max_lun, 2436 host->max_cmd_len, host->max_channel, host->max_lun,
2418 host->transportt, sht->vendor_id); 2437 host->transportt, sht->vendor_id);
2419 2438
2439que_init:
2440 /* Alloc arrays of request and response ring ptrs */
2441 if (!qla2x00_alloc_queues(ha, req, rsp)) {
2442 ql_log(ql_log_fatal, base_vha, 0x003d,
2443 "Failed to allocate memory for queue pointers..."
2444 "aborting.\n");
2445 goto probe_init_failed;
2446 }
2447
2448 qlt_probe_one_stage1(base_vha, ha);
2449
2420 /* Set up the irqs */ 2450 /* Set up the irqs */
2421 ret = qla2x00_request_irqs(ha, rsp); 2451 ret = qla2x00_request_irqs(ha, rsp);
2422 if (ret) 2452 if (ret)
@@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2424 2454
2425 pci_save_state(pdev); 2455 pci_save_state(pdev);
2426 2456
2427 /* Alloc arrays of request and response ring ptrs */ 2457 /* Assign back pointers */
2428que_init:
2429 if (!qla2x00_alloc_queues(ha)) {
2430 ql_log(ql_log_fatal, base_vha, 0x003d,
2431 "Failed to allocate memory for queue pointers.. aborting.\n");
2432 goto probe_init_failed;
2433 }
2434
2435 ha->rsp_q_map[0] = rsp;
2436 ha->req_q_map[0] = req;
2437 rsp->req = req; 2458 rsp->req = req;
2438 req->rsp = rsp; 2459 req->rsp = rsp;
2439 set_bit(0, ha->req_qid_map); 2460
2440 set_bit(0, ha->rsp_qid_map);
2441 /* FWI2-capable only. */ 2461 /* FWI2-capable only. */
2442 req->req_q_in = &ha->iobase->isp24.req_q_in; 2462 req->req_q_in = &ha->iobase->isp24.req_q_in;
2443 req->req_q_out = &ha->iobase->isp24.req_q_out; 2463 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2514,6 +2534,14 @@ que_init:
2514 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 2534 ql_dbg(ql_dbg_init, base_vha, 0x00ee,
2515 "DPC thread started successfully.\n"); 2535 "DPC thread started successfully.\n");
2516 2536
2537 /*
2538 * If we're not coming up in initiator mode, we might sit for
2539 * a while without waking up the dpc thread, which leads to a
2540 * stuck process warning. So just kick the dpc once here and
2541 * let the kthread start (and go back to sleep in qla2x00_do_dpc).
2542 */
2543 qla2xxx_wake_dpc(base_vha);
2544
2517skip_dpc: 2545skip_dpc:
2518 list_add_tail(&base_vha->list, &ha->vp_list); 2546 list_add_tail(&base_vha->list, &ha->vp_list);
2519 base_vha->host->irq = ha->pdev->irq; 2547 base_vha->host->irq = ha->pdev->irq;
@@ -2559,7 +2587,11 @@ skip_dpc:
2559 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 2587 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
2560 "Init done and hba is online.\n"); 2588 "Init done and hba is online.\n");
2561 2589
2562 scsi_scan_host(host); 2590 if (qla_ini_mode_enabled(base_vha))
2591 scsi_scan_host(host);
2592 else
2593 ql_dbg(ql_dbg_init, base_vha, 0x0122,
2594 "skipping scsi_scan_host() for non-initiator port\n");
2563 2595
2564 qla2x00_alloc_sysfs_attr(base_vha); 2596 qla2x00_alloc_sysfs_attr(base_vha);
2565 2597
@@ -2577,11 +2609,17 @@ skip_dpc:
2577 base_vha->host_no, 2609 base_vha->host_no,
2578 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2610 ha->isp_ops->fw_version_str(base_vha, fw_str));
2579 2611
2612 qlt_add_target(ha, base_vha);
2613
2580 return 0; 2614 return 0;
2581 2615
2582probe_init_failed: 2616probe_init_failed:
2583 qla2x00_free_req_que(ha, req); 2617 qla2x00_free_req_que(ha, req);
2618 ha->req_q_map[0] = NULL;
2619 clear_bit(0, ha->req_qid_map);
2584 qla2x00_free_rsp_que(ha, rsp); 2620 qla2x00_free_rsp_que(ha, rsp);
2621 ha->rsp_q_map[0] = NULL;
2622 clear_bit(0, ha->rsp_qid_map);
2585 ha->max_req_queues = ha->max_rsp_queues = 0; 2623 ha->max_req_queues = ha->max_rsp_queues = 0;
2586 2624
2587probe_failed: 2625probe_failed:
@@ -2621,6 +2659,22 @@ probe_out:
2621} 2659}
2622 2660
2623static void 2661static void
2662qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
2663{
2664 struct qla_hw_data *ha = vha->hw;
2665 struct task_struct *t = ha->dpc_thread;
2666
2667 if (ha->dpc_thread == NULL)
2668 return;
2669 /*
2670 * qla2xxx_wake_dpc checks for ->dpc_thread
2671 * so we need to zero it out.
2672 */
2673 ha->dpc_thread = NULL;
2674 kthread_stop(t);
2675}
2676
2677static void
2624qla2x00_shutdown(struct pci_dev *pdev) 2678qla2x00_shutdown(struct pci_dev *pdev)
2625{ 2679{
2626 scsi_qla_host_t *vha; 2680 scsi_qla_host_t *vha;
@@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev)
2663 struct qla_hw_data *ha; 2717 struct qla_hw_data *ha;
2664 unsigned long flags; 2718 unsigned long flags;
2665 2719
2720 /*
2721 * If the PCI device is disabled that means that probe failed and any
2722 * resources should be have cleaned up on probe exit.
2723 */
2724 if (!atomic_read(&pdev->enable_cnt))
2725 return;
2726
2666 base_vha = pci_get_drvdata(pdev); 2727 base_vha = pci_get_drvdata(pdev);
2667 ha = base_vha->hw; 2728 ha = base_vha->hw;
2668 2729
2730 ha->flags.host_shutting_down = 1;
2731
2669 mutex_lock(&ha->vport_lock); 2732 mutex_lock(&ha->vport_lock);
2670 while (ha->cur_vport_count) { 2733 while (ha->cur_vport_count) {
2671 struct Scsi_Host *scsi_host; 2734 struct Scsi_Host *scsi_host;
@@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
2719 ha->dpc_thread = NULL; 2782 ha->dpc_thread = NULL;
2720 kthread_stop(t); 2783 kthread_stop(t);
2721 } 2784 }
2785 qlt_remove_target(ha, base_vha);
2722 2786
2723 qla2x00_free_sysfs_attr(base_vha); 2787 qla2x00_free_sysfs_attr(base_vha);
2724 2788
@@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2770 if (vha->timer_active) 2834 if (vha->timer_active)
2771 qla2x00_stop_timer(vha); 2835 qla2x00_stop_timer(vha);
2772 2836
2773 /* Kill the kernel thread for this host */ 2837 qla2x00_stop_dpc_thread(vha);
2774 if (ha->dpc_thread) {
2775 struct task_struct *t = ha->dpc_thread;
2776
2777 /*
2778 * qla2xxx_wake_dpc checks for ->dpc_thread
2779 * so we need to zero it out.
2780 */
2781 ha->dpc_thread = NULL;
2782 kthread_stop(t);
2783 }
2784 2838
2785 qla25xx_delete_queues(vha); 2839 qla25xx_delete_queues(vha);
2786 2840
@@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2842 spin_unlock_irqrestore(vha->host->host_lock, flags); 2896 spin_unlock_irqrestore(vha->host->host_lock, flags);
2843 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 2897 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2844 qla2xxx_wake_dpc(base_vha); 2898 qla2xxx_wake_dpc(base_vha);
2845 } else 2899 } else {
2846 fc_remote_port_delete(rport); 2900 fc_remote_port_delete(rport);
2901 qlt_fc_port_deleted(vha, fcport);
2902 }
2847} 2903}
2848 2904
2849/* 2905/*
@@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2859 int do_login, int defer) 2915 int do_login, int defer)
2860{ 2916{
2861 if (atomic_read(&fcport->state) == FCS_ONLINE && 2917 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2862 vha->vp_idx == fcport->vp_idx) { 2918 vha->vp_idx == fcport->vha->vp_idx) {
2863 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2919 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2864 qla2x00_schedule_rport_del(vha, fcport, defer); 2920 qla2x00_schedule_rport_del(vha, fcport, defer);
2865 } 2921 }
@@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2908 fc_port_t *fcport; 2964 fc_port_t *fcport;
2909 2965
2910 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2966 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2911 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx) 2967 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
2912 continue; 2968 continue;
2913 2969
2914 /* 2970 /*
@@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2921 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2977 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2922 if (defer) 2978 if (defer)
2923 qla2x00_schedule_rport_del(vha, fcport, defer); 2979 qla2x00_schedule_rport_del(vha, fcport, defer);
2924 else if (vha->vp_idx == fcport->vp_idx) 2980 else if (vha->vp_idx == fcport->vha->vp_idx)
2925 qla2x00_schedule_rport_del(vha, fcport, defer); 2981 qla2x00_schedule_rport_del(vha, fcport, defer);
2926 } 2982 }
2927 } 2983 }
@@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2946 if (!ha->init_cb) 3002 if (!ha->init_cb)
2947 goto fail; 3003 goto fail;
2948 3004
3005 if (qlt_mem_alloc(ha) < 0)
3006 goto fail_free_init_cb;
3007
2949 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 3008 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
2950 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 3009 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
2951 if (!ha->gid_list) 3010 if (!ha->gid_list)
2952 goto fail_free_init_cb; 3011 goto fail_free_tgt_mem;
2953 3012
2954 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 3013 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2955 if (!ha->srb_mempool) 3014 if (!ha->srb_mempool)
@@ -3167,6 +3226,8 @@ fail_free_gid_list:
3167 ha->gid_list_dma); 3226 ha->gid_list_dma);
3168 ha->gid_list = NULL; 3227 ha->gid_list = NULL;
3169 ha->gid_list_dma = 0; 3228 ha->gid_list_dma = 0;
3229fail_free_tgt_mem:
3230 qlt_mem_free(ha);
3170fail_free_init_cb: 3231fail_free_init_cb:
3171 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 3232 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
3172 ha->init_cb_dma); 3233 ha->init_cb_dma);
@@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3282 if (ha->ctx_mempool) 3343 if (ha->ctx_mempool)
3283 mempool_destroy(ha->ctx_mempool); 3344 mempool_destroy(ha->ctx_mempool);
3284 3345
3346 qlt_mem_free(ha);
3347
3285 if (ha->init_cb) 3348 if (ha->init_cb)
3286 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 3349 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
3287 ha->init_cb, ha->init_cb_dma); 3350 ha->init_cb, ha->init_cb_dma);
@@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3311 3374
3312 ha->gid_list = NULL; 3375 ha->gid_list = NULL;
3313 ha->gid_list_dma = 0; 3376 ha->gid_list_dma = 0;
3377
3378 ha->tgt.atio_ring = NULL;
3379 ha->tgt.atio_dma = 0;
3380 ha->tgt.tgt_vp_map = NULL;
3314} 3381}
3315 3382
3316struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 3383struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data)
3671 3738
3672 ha->dpc_active = 1; 3739 ha->dpc_active = 1;
3673 3740
3674 ql_dbg(ql_dbg_dpc, base_vha, 0x4001, 3741 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
3675 "DPC handler waking up.\n"); 3742 "DPC handler waking up, dpc_flags=0x%lx.\n",
3676 ql_dbg(ql_dbg_dpc, base_vha, 0x4002, 3743 base_vha->dpc_flags);
3677 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3678 3744
3679 qla2x00_do_work(base_vha); 3745 qla2x00_do_work(base_vha);
3680 3746
@@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data)
3740 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3806 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3741 } 3807 }
3742 3808
3809 if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
3810 int ret;
3811 ret = qla2x00_send_change_request(base_vha, 0x3, 0);
3812 if (ret != QLA_SUCCESS)
3813 ql_log(ql_log_warn, base_vha, 0x121,
3814 "Failed to enable receiving of RSCN "
3815 "requests: 0x%x.\n", ret);
3816 clear_bit(SCR_PENDING, &base_vha->dpc_flags);
3817 }
3818
3743 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 3819 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3744 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 3820 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3745 "Quiescence mode scheduled.\n"); 3821 "Quiescence mode scheduled.\n");
@@ -4457,6 +4533,21 @@ qla2x00_module_init(void)
4457 return -ENOMEM; 4533 return -ENOMEM;
4458 } 4534 }
4459 4535
4536 /* Initialize target kmem_cache and mem_pools */
4537 ret = qlt_init();
4538 if (ret < 0) {
4539 kmem_cache_destroy(srb_cachep);
4540 return ret;
4541 } else if (ret > 0) {
4542 /*
4543 * If initiator mode is explictly disabled by qlt_init(),
4544 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
4545 * performing scsi_scan_target() during LOOP UP event.
4546 */
4547 qla2xxx_transport_functions.disable_target_scan = 1;
4548 qla2xxx_transport_vport_functions.disable_target_scan = 1;
4549 }
4550
4460 /* Derive version string. */ 4551 /* Derive version string. */
4461 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 4552 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
4462 if (ql2xextended_error_logging) 4553 if (ql2xextended_error_logging)
@@ -4468,6 +4559,7 @@ qla2x00_module_init(void)
4468 kmem_cache_destroy(srb_cachep); 4559 kmem_cache_destroy(srb_cachep);
4469 ql_log(ql_log_fatal, NULL, 0x0002, 4560 ql_log(ql_log_fatal, NULL, 0x0002,
4470 "fc_attach_transport failed...Failing load!.\n"); 4561 "fc_attach_transport failed...Failing load!.\n");
4562 qlt_exit();
4471 return -ENODEV; 4563 return -ENODEV;
4472 } 4564 }
4473 4565
@@ -4481,6 +4573,7 @@ qla2x00_module_init(void)
4481 fc_attach_transport(&qla2xxx_transport_vport_functions); 4573 fc_attach_transport(&qla2xxx_transport_vport_functions);
4482 if (!qla2xxx_transport_vport_template) { 4574 if (!qla2xxx_transport_vport_template) {
4483 kmem_cache_destroy(srb_cachep); 4575 kmem_cache_destroy(srb_cachep);
4576 qlt_exit();
4484 fc_release_transport(qla2xxx_transport_template); 4577 fc_release_transport(qla2xxx_transport_template);
4485 ql_log(ql_log_fatal, NULL, 0x0004, 4578 ql_log(ql_log_fatal, NULL, 0x0004,
4486 "fc_attach_transport vport failed...Failing load!.\n"); 4579 "fc_attach_transport vport failed...Failing load!.\n");
@@ -4492,6 +4585,7 @@ qla2x00_module_init(void)
4492 ret = pci_register_driver(&qla2xxx_pci_driver); 4585 ret = pci_register_driver(&qla2xxx_pci_driver);
4493 if (ret) { 4586 if (ret) {
4494 kmem_cache_destroy(srb_cachep); 4587 kmem_cache_destroy(srb_cachep);
4588 qlt_exit();
4495 fc_release_transport(qla2xxx_transport_template); 4589 fc_release_transport(qla2xxx_transport_template);
4496 fc_release_transport(qla2xxx_transport_vport_template); 4590 fc_release_transport(qla2xxx_transport_vport_template);
4497 ql_log(ql_log_fatal, NULL, 0x0006, 4591 ql_log(ql_log_fatal, NULL, 0x0006,
@@ -4511,6 +4605,7 @@ qla2x00_module_exit(void)
4511 pci_unregister_driver(&qla2xxx_pci_driver); 4605 pci_unregister_driver(&qla2xxx_pci_driver);
4512 qla2x00_release_firmware(); 4606 qla2x00_release_firmware();
4513 kmem_cache_destroy(srb_cachep); 4607 kmem_cache_destroy(srb_cachep);
4608 qlt_exit();
4514 if (ctx_cachep) 4609 if (ctx_cachep)
4515 kmem_cache_destroy(ctx_cachep); 4610 kmem_cache_destroy(ctx_cachep);
4516 fc_release_transport(qla2xxx_transport_template); 4611 fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644
index 000000000000..6986552b47e6
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -0,0 +1,4972 @@
1/*
2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
3 *
4 * based on qla2x00t.c code:
5 *
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
10 *
11 * Forward port and refactoring to modern qla2xxx and target/configfs
12 *
13 * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
18 * of the License.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/types.h>
29#include <linux/blkdev.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/delay.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <asm/unaligned.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_tcq.h>
39#include <target/target_core_base.h>
40#include <target/target_core_fabric.h>
41
42#include "qla_def.h"
43#include "qla_target.h"
44
45static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
46module_param(qlini_mode, charp, S_IRUGO);
47MODULE_PARM_DESC(qlini_mode,
48 "Determines when initiator mode will be enabled. Possible values: "
49 "\"exclusive\" - initiator mode will be enabled on load, "
50 "disabled on enabling target mode and then on disabling target mode "
51 "enabled back; "
52 "\"disabled\" - initiator mode will never be enabled; "
53 "\"enabled\" (default) - initiator mode will always stay enabled.");
54
55static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
56
57/*
58 * From scsi/fc/fc_fcp.h
59 */
60enum fcp_resp_rsp_codes {
61 FCP_TMF_CMPL = 0,
62 FCP_DATA_LEN_INVALID = 1,
63 FCP_CMND_FIELDS_INVALID = 2,
64 FCP_DATA_PARAM_MISMATCH = 3,
65 FCP_TMF_REJECTED = 4,
66 FCP_TMF_FAILED = 5,
67 FCP_TMF_INVALID_LUN = 9,
68};
69
70/*
71 * fc_pri_ta from scsi/fc/fc_fcp.h
72 */
73#define FCP_PTA_SIMPLE 0 /* simple task attribute */
74#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
75#define FCP_PTA_ORDERED 2 /* ordered task attribute */
76#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
77#define FCP_PTA_MASK 7 /* mask for task attribute field */
78#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
79#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
80
81/*
82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
83 * must be called under HW lock and could unlock/lock it inside.
84 * It isn't an issue, since in the current implementation on the time when
85 * those functions are called:
86 *
87 * - Either context is IRQ and only IRQ handler can modify HW data,
88 * including rings related fields,
89 *
90 * - Or access to target mode variables from struct qla_tgt doesn't
91 * cross those functions boundaries, except tgt_stop, which
92 * additionally protected by irq_cmd_count.
93 */
94/* Predefs for callbacks handed to qla2xxx LLD */
95static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
96 struct atio_from_isp *pkt);
97static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
98static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
99 int fn, void *iocb, int flags);
100static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
101 *cmd, struct atio_from_isp *atio, int ha_locked);
102static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
103 struct qla_tgt_srr_imm *imm, int ha_lock);
104/*
105 * Global Variables
106 */
107static struct kmem_cache *qla_tgt_cmd_cachep;
108static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
109static mempool_t *qla_tgt_mgmt_cmd_mempool;
110static struct workqueue_struct *qla_tgt_wq;
111static DEFINE_MUTEX(qla_tgt_mutex);
112static LIST_HEAD(qla_tgt_glist);
113
114/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
115static struct qla_tgt_sess *qlt_find_sess_by_port_name(
116 struct qla_tgt *tgt,
117 const uint8_t *port_name)
118{
119 struct qla_tgt_sess *sess;
120
121 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
122 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
123 return sess;
124 }
125
126 return NULL;
127}
128
129/* Might release hw lock, then reaquire!! */
130static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
131{
132 /* Send marker if required */
133 if (unlikely(vha->marker_needed != 0)) {
134 int rc = qla2x00_issue_marker(vha, vha_locked);
135 if (rc != QLA_SUCCESS) {
136 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
137 "qla_target(%d): issue_marker() failed\n",
138 vha->vp_idx);
139 }
140 return rc;
141 }
142 return QLA_SUCCESS;
143}
144
145static inline
146struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
147 uint8_t *d_id)
148{
149 struct qla_hw_data *ha = vha->hw;
150 uint8_t vp_idx;
151
152 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
153 return NULL;
154
155 if (vha->d_id.b.al_pa == d_id[2])
156 return vha;
157
158 BUG_ON(ha->tgt.tgt_vp_map == NULL);
159 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
160 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
161 return ha->tgt.tgt_vp_map[vp_idx].vha;
162
163 return NULL;
164}
165
166static inline
167struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
168 uint16_t vp_idx)
169{
170 struct qla_hw_data *ha = vha->hw;
171
172 if (vha->vp_idx == vp_idx)
173 return vha;
174
175 BUG_ON(ha->tgt.tgt_vp_map == NULL);
176 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
177 return ha->tgt.tgt_vp_map[vp_idx].vha;
178
179 return NULL;
180}
181
182void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
183 struct atio_from_isp *atio)
184{
185 switch (atio->u.raw.entry_type) {
186 case ATIO_TYPE7:
187 {
188 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
189 atio->u.isp24.fcp_hdr.d_id);
190 if (unlikely(NULL == host)) {
191 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
192 "qla_target(%d): Received ATIO_TYPE7 "
193 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
194 atio->u.isp24.fcp_hdr.d_id[0],
195 atio->u.isp24.fcp_hdr.d_id[1],
196 atio->u.isp24.fcp_hdr.d_id[2]);
197 break;
198 }
199 qlt_24xx_atio_pkt(host, atio);
200 break;
201 }
202
203 case IMMED_NOTIFY_TYPE:
204 {
205 struct scsi_qla_host *host = vha;
206 struct imm_ntfy_from_isp *entry =
207 (struct imm_ntfy_from_isp *)atio;
208
209 if ((entry->u.isp24.vp_index != 0xFF) &&
210 (entry->u.isp24.nport_handle != 0xFFFF)) {
211 host = qlt_find_host_by_vp_idx(vha,
212 entry->u.isp24.vp_index);
213 if (unlikely(!host)) {
214 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
215 "qla_target(%d): Received "
216 "ATIO (IMMED_NOTIFY_TYPE) "
217 "with unknown vp_index %d\n",
218 vha->vp_idx, entry->u.isp24.vp_index);
219 break;
220 }
221 }
222 qlt_24xx_atio_pkt(host, atio);
223 break;
224 }
225
226 default:
227 ql_dbg(ql_dbg_tgt, vha, 0xe040,
228 "qla_target(%d): Received unknown ATIO atio "
229 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
230 break;
231 }
232
233 return;
234}
235
236void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
237{
238 switch (pkt->entry_type) {
239 case CTIO_TYPE7:
240 {
241 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
242 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
243 entry->vp_index);
244 if (unlikely(!host)) {
245 ql_dbg(ql_dbg_tgt, vha, 0xe041,
246 "qla_target(%d): Response pkt (CTIO_TYPE7) "
247 "received, with unknown vp_index %d\n",
248 vha->vp_idx, entry->vp_index);
249 break;
250 }
251 qlt_response_pkt(host, pkt);
252 break;
253 }
254
255 case IMMED_NOTIFY_TYPE:
256 {
257 struct scsi_qla_host *host = vha;
258 struct imm_ntfy_from_isp *entry =
259 (struct imm_ntfy_from_isp *)pkt;
260
261 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
262 if (unlikely(!host)) {
263 ql_dbg(ql_dbg_tgt, vha, 0xe042,
264 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
265 "received, with unknown vp_index %d\n",
266 vha->vp_idx, entry->u.isp24.vp_index);
267 break;
268 }
269 qlt_response_pkt(host, pkt);
270 break;
271 }
272
273 case NOTIFY_ACK_TYPE:
274 {
275 struct scsi_qla_host *host = vha;
276 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
277
278 if (0xFF != entry->u.isp24.vp_index) {
279 host = qlt_find_host_by_vp_idx(vha,
280 entry->u.isp24.vp_index);
281 if (unlikely(!host)) {
282 ql_dbg(ql_dbg_tgt, vha, 0xe043,
283 "qla_target(%d): Response "
284 "pkt (NOTIFY_ACK_TYPE) "
285 "received, with unknown "
286 "vp_index %d\n", vha->vp_idx,
287 entry->u.isp24.vp_index);
288 break;
289 }
290 }
291 qlt_response_pkt(host, pkt);
292 break;
293 }
294
295 case ABTS_RECV_24XX:
296 {
297 struct abts_recv_from_24xx *entry =
298 (struct abts_recv_from_24xx *)pkt;
299 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
300 entry->vp_index);
301 if (unlikely(!host)) {
302 ql_dbg(ql_dbg_tgt, vha, 0xe044,
303 "qla_target(%d): Response pkt "
304 "(ABTS_RECV_24XX) received, with unknown "
305 "vp_index %d\n", vha->vp_idx, entry->vp_index);
306 break;
307 }
308 qlt_response_pkt(host, pkt);
309 break;
310 }
311
312 case ABTS_RESP_24XX:
313 {
314 struct abts_resp_to_24xx *entry =
315 (struct abts_resp_to_24xx *)pkt;
316 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
317 entry->vp_index);
318 if (unlikely(!host)) {
319 ql_dbg(ql_dbg_tgt, vha, 0xe045,
320 "qla_target(%d): Response pkt "
321 "(ABTS_RECV_24XX) received, with unknown "
322 "vp_index %d\n", vha->vp_idx, entry->vp_index);
323 break;
324 }
325 qlt_response_pkt(host, pkt);
326 break;
327 }
328
329 default:
330 qlt_response_pkt(vha, pkt);
331 break;
332 }
333
334}
335
336static void qlt_free_session_done(struct work_struct *work)
337{
338 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
339 free_work);
340 struct qla_tgt *tgt = sess->tgt;
341 struct scsi_qla_host *vha = sess->vha;
342 struct qla_hw_data *ha = vha->hw;
343
344 BUG_ON(!tgt);
345 /*
346 * Release the target session for FC Nexus from fabric module code.
347 */
348 if (sess->se_sess != NULL)
349 ha->tgt.tgt_ops->free_session(sess);
350
351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
352 "Unregistration of sess %p finished\n", sess);
353
354 kfree(sess);
355 /*
356 * We need to protect against race, when tgt is freed before or
357 * inside wake_up()
358 */
359 tgt->sess_count--;
360 if (tgt->sess_count == 0)
361 wake_up_all(&tgt->waitQ);
362}
363
364/* ha->hardware_lock supposed to be held on entry */
365void qlt_unreg_sess(struct qla_tgt_sess *sess)
366{
367 struct scsi_qla_host *vha = sess->vha;
368
369 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
370
371 list_del(&sess->sess_list_entry);
372 if (sess->deleted)
373 list_del(&sess->del_list_entry);
374
375 INIT_WORK(&sess->free_work, qlt_free_session_done);
376 schedule_work(&sess->free_work);
377}
378EXPORT_SYMBOL(qlt_unreg_sess);
379
380/* ha->hardware_lock supposed to be held on entry */
381static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
382{
383 struct qla_hw_data *ha = vha->hw;
384 struct qla_tgt_sess *sess = NULL;
385 uint32_t unpacked_lun, lun = 0;
386 uint16_t loop_id;
387 int res = 0;
388 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
389 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
390
391 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
392 if (loop_id == 0xFFFF) {
393#if 0 /* FIXME: Re-enable Global event handling.. */
394 /* Global event */
395 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
396 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
397 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
398 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
399 typeof(*sess), sess_list_entry);
400 switch (mcmd) {
401 case QLA_TGT_NEXUS_LOSS_SESS:
402 mcmd = QLA_TGT_NEXUS_LOSS;
403 break;
404 case QLA_TGT_ABORT_ALL_SESS:
405 mcmd = QLA_TGT_ABORT_ALL;
406 break;
407 case QLA_TGT_NEXUS_LOSS:
408 case QLA_TGT_ABORT_ALL:
409 break;
410 default:
411 ql_dbg(ql_dbg_tgt, vha, 0xe046,
412 "qla_target(%d): Not allowed "
413 "command %x in %s", vha->vp_idx,
414 mcmd, __func__);
415 sess = NULL;
416 break;
417 }
418 } else
419 sess = NULL;
420#endif
421 } else {
422 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
423 }
424
425 ql_dbg(ql_dbg_tgt, vha, 0xe000,
426 "Using sess for qla_tgt_reset: %p\n", sess);
427 if (!sess) {
428 res = -ESRCH;
429 return res;
430 }
431
432 ql_dbg(ql_dbg_tgt, vha, 0xe047,
433 "scsi(%ld): resetting (session %p from port "
434 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
435 "mcmd %x, loop_id %d)\n", vha->host_no, sess,
436 sess->port_name[0], sess->port_name[1],
437 sess->port_name[2], sess->port_name[3],
438 sess->port_name[4], sess->port_name[5],
439 sess->port_name[6], sess->port_name[7],
440 mcmd, loop_id);
441
442 lun = a->u.isp24.fcp_cmnd.lun;
443 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
444
445 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
446 iocb, QLA24XX_MGMT_SEND_NACK);
447}
448
449/* ha->hardware_lock supposed to be held on entry */
450static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
451 bool immediate)
452{
453 struct qla_tgt *tgt = sess->tgt;
454 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
455
456 if (sess->deleted)
457 return;
458
459 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
460 "Scheduling sess %p for deletion\n", sess);
461 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
462 sess->deleted = 1;
463
464 if (immediate)
465 dev_loss_tmo = 0;
466
467 sess->expires = jiffies + dev_loss_tmo * HZ;
468
469 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
470 "qla_target(%d): session for port %02x:%02x:%02x:"
471 "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
472 "deletion in %u secs (expires: %lu) immed: %d\n",
473 sess->vha->vp_idx,
474 sess->port_name[0], sess->port_name[1],
475 sess->port_name[2], sess->port_name[3],
476 sess->port_name[4], sess->port_name[5],
477 sess->port_name[6], sess->port_name[7],
478 sess->loop_id, dev_loss_tmo, sess->expires, immediate);
479
480 if (immediate)
481 schedule_delayed_work(&tgt->sess_del_work, 0);
482 else
483 schedule_delayed_work(&tgt->sess_del_work,
484 jiffies - sess->expires);
485}
486
487/* ha->hardware_lock supposed to be held on entry */
488static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
489{
490 struct qla_tgt_sess *sess;
491
492 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
493 qlt_schedule_sess_for_deletion(sess, true);
494
495 /* At this point tgt could be already dead */
496}
497
498static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
499 uint16_t *loop_id)
500{
501 struct qla_hw_data *ha = vha->hw;
502 dma_addr_t gid_list_dma;
503 struct gid_list_info *gid_list;
504 char *id_iter;
505 int res, rc, i;
506 uint16_t entries;
507
508 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
509 &gid_list_dma, GFP_KERNEL);
510 if (!gid_list) {
511 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
512 "qla_target(%d): DMA Alloc failed of %u\n",
513 vha->vp_idx, qla2x00_gid_list_size(ha));
514 return -ENOMEM;
515 }
516
517 /* Get list of logged in devices */
518 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
519 if (rc != QLA_SUCCESS) {
520 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
521 "qla_target(%d): get_id_list() failed: %x\n",
522 vha->vp_idx, rc);
523 res = -1;
524 goto out_free_id_list;
525 }
526
527 id_iter = (char *)gid_list;
528 res = -1;
529 for (i = 0; i < entries; i++) {
530 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
531 if ((gid->al_pa == s_id[2]) &&
532 (gid->area == s_id[1]) &&
533 (gid->domain == s_id[0])) {
534 *loop_id = le16_to_cpu(gid->loop_id);
535 res = 0;
536 break;
537 }
538 id_iter += ha->gid_list_info_size;
539 }
540
541out_free_id_list:
542 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
543 gid_list, gid_list_dma);
544 return res;
545}
546
547static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
548 struct qla_tgt_sess *sess)
549{
550 struct qla_hw_data *ha = vha->hw;
551 struct qla_port_24xx_data *pmap24;
552 bool res, found = false;
553 int rc, i;
554 uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
555 uint16_t entries;
556 void *pmap;
557 int pmap_len;
558 fc_port_t *fcport;
559 int global_resets;
560
561retry:
562 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
563
564 rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
565 if (rc != QLA_SUCCESS) {
566 res = false;
567 goto out;
568 }
569
570 pmap24 = pmap;
571 entries = pmap_len/sizeof(*pmap24);
572
573 for (i = 0; i < entries; ++i) {
574 if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
575 loop_id = le16_to_cpu(pmap24[i].loop_id);
576 found = true;
577 break;
578 }
579 }
580
581 kfree(pmap);
582
583 if (!found) {
584 res = false;
585 goto out;
586 }
587
588 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
589 "qlt_check_fcport_exist(): loop_id %d", loop_id);
590
591 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
592 if (fcport == NULL) {
593 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
594 "qla_target(%d): Allocation of tmp FC port failed",
595 vha->vp_idx);
596 res = false;
597 goto out;
598 }
599
600 fcport->loop_id = loop_id;
601
602 rc = qla2x00_get_port_database(vha, fcport, 0);
603 if (rc != QLA_SUCCESS) {
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
605 "qla_target(%d): Failed to retrieve fcport "
606 "information -- get_port_database() returned %x "
607 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
608 res = false;
609 goto out_free_fcport;
610 }
611
612 if (global_resets !=
613 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
614 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
615 "qla_target(%d): global reset during session discovery"
616 " (counter was %d, new %d), retrying",
617 vha->vp_idx, global_resets,
618 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
619 goto retry;
620 }
621
622 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
623 "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
624 "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
625 sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
626 fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
627
628 sess->s_id = fcport->d_id;
629 sess->loop_id = fcport->loop_id;
630 sess->conf_compl_supported = !!(fcport->flags &
631 FCF_CONF_COMP_SUPPORTED);
632
633 res = true;
634
635out_free_fcport:
636 kfree(fcport);
637
638out:
639 return res;
640}
641
642/* ha->hardware_lock supposed to be held on entry */
643static void qlt_undelete_sess(struct qla_tgt_sess *sess)
644{
645 BUG_ON(!sess->deleted);
646
647 list_del(&sess->del_list_entry);
648 sess->deleted = 0;
649}
650
651static void qlt_del_sess_work_fn(struct delayed_work *work)
652{
653 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
654 sess_del_work);
655 struct scsi_qla_host *vha = tgt->vha;
656 struct qla_hw_data *ha = vha->hw;
657 struct qla_tgt_sess *sess;
658 unsigned long flags;
659
660 spin_lock_irqsave(&ha->hardware_lock, flags);
661 while (!list_empty(&tgt->del_sess_list)) {
662 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
663 del_list_entry);
664 if (time_after_eq(jiffies, sess->expires)) {
665 bool cancel;
666
667 qlt_undelete_sess(sess);
668
669 spin_unlock_irqrestore(&ha->hardware_lock, flags);
670 cancel = qlt_check_fcport_exist(vha, sess);
671
672 if (cancel) {
673 if (sess->deleted) {
674 /*
675 * sess was again deleted while we were
676 * discovering it
677 */
678 spin_lock_irqsave(&ha->hardware_lock,
679 flags);
680 continue;
681 }
682
683 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
684 "qla_target(%d): cancel deletion of "
685 "session for port %02x:%02x:%02x:%02x:%02x:"
686 "%02x:%02x:%02x (loop ID %d), because "
687 " it isn't deleted by firmware",
688 vha->vp_idx, sess->port_name[0],
689 sess->port_name[1], sess->port_name[2],
690 sess->port_name[3], sess->port_name[4],
691 sess->port_name[5], sess->port_name[6],
692 sess->port_name[7], sess->loop_id);
693 } else {
694 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
695 "Timeout: sess %p about to be deleted\n",
696 sess);
697 ha->tgt.tgt_ops->shutdown_sess(sess);
698 ha->tgt.tgt_ops->put_sess(sess);
699 }
700
701 spin_lock_irqsave(&ha->hardware_lock, flags);
702 } else {
703 schedule_delayed_work(&tgt->sess_del_work,
704 jiffies - sess->expires);
705 break;
706 }
707 }
708 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709}
710
711/*
712 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
713 * Caller must put it.
714 */
715static struct qla_tgt_sess *qlt_create_sess(
716 struct scsi_qla_host *vha,
717 fc_port_t *fcport,
718 bool local)
719{
720 struct qla_hw_data *ha = vha->hw;
721 struct qla_tgt_sess *sess;
722 unsigned long flags;
723 unsigned char be_sid[3];
724
725 /* Check to avoid double sessions */
726 spin_lock_irqsave(&ha->hardware_lock, flags);
727 list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
728 sess_list_entry) {
729 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
731 "Double sess %p found (s_id %x:%x:%x, "
732 "loop_id %d), updating to d_id %x:%x:%x, "
733 "loop_id %d", sess, sess->s_id.b.domain,
734 sess->s_id.b.al_pa, sess->s_id.b.area,
735 sess->loop_id, fcport->d_id.b.domain,
736 fcport->d_id.b.al_pa, fcport->d_id.b.area,
737 fcport->loop_id);
738
739 if (sess->deleted)
740 qlt_undelete_sess(sess);
741
742 kref_get(&sess->se_sess->sess_kref);
743 sess->s_id = fcport->d_id;
744 sess->loop_id = fcport->loop_id;
745 sess->conf_compl_supported = !!(fcport->flags &
746 FCF_CONF_COMP_SUPPORTED);
747 if (sess->local && !local)
748 sess->local = 0;
749 spin_unlock_irqrestore(&ha->hardware_lock, flags);
750
751 return sess;
752 }
753 }
754 spin_unlock_irqrestore(&ha->hardware_lock, flags);
755
756 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
757 if (!sess) {
758 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
759 "qla_target(%u): session allocation failed, "
760 "all commands from port %02x:%02x:%02x:%02x:"
761 "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
762 fcport->port_name[0], fcport->port_name[1],
763 fcport->port_name[2], fcport->port_name[3],
764 fcport->port_name[4], fcport->port_name[5],
765 fcport->port_name[6], fcport->port_name[7]);
766
767 return NULL;
768 }
769 sess->tgt = ha->tgt.qla_tgt;
770 sess->vha = vha;
771 sess->s_id = fcport->d_id;
772 sess->loop_id = fcport->loop_id;
773 sess->local = local;
774
775 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
776 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
777 sess, ha->tgt.qla_tgt);
778
779 be_sid[0] = sess->s_id.b.domain;
780 be_sid[1] = sess->s_id.b.area;
781 be_sid[2] = sess->s_id.b.al_pa;
782 /*
783 * Determine if this fc_port->port_name is allowed to access
784 * target mode using explict NodeACLs+MappedLUNs, or using
785 * TPG demo mode. If this is successful a target mode FC nexus
786 * is created.
787 */
788 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
789 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
790 kfree(sess);
791 return NULL;
792 }
793 /*
794 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
795 * access across ->hardware_lock reaquire.
796 */
797 kref_get(&sess->se_sess->sess_kref);
798
799 sess->conf_compl_supported = !!(fcport->flags &
800 FCF_CONF_COMP_SUPPORTED);
801 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
802 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
803
804 spin_lock_irqsave(&ha->hardware_lock, flags);
805 list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
806 ha->tgt.qla_tgt->sess_count++;
807 spin_unlock_irqrestore(&ha->hardware_lock, flags);
808
809 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
810 "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
811 "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
812 " completion %ssupported) added\n",
813 vha->vp_idx, local ? "local " : "", fcport->port_name[0],
814 fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
815 fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
816 fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
817 sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
818 "" : "not ");
819
820 return sess;
821}
822
823/*
824 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
825 */
826void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
827{
828 struct qla_hw_data *ha = vha->hw;
829 struct qla_tgt *tgt = ha->tgt.qla_tgt;
830 struct qla_tgt_sess *sess;
831 unsigned long flags;
832
833 if (!vha->hw->tgt.tgt_ops)
834 return;
835
836 if (!tgt || (fcport->port_type != FCT_INITIATOR))
837 return;
838
839 spin_lock_irqsave(&ha->hardware_lock, flags);
840 if (tgt->tgt_stop) {
841 spin_unlock_irqrestore(&ha->hardware_lock, flags);
842 return;
843 }
844 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
845 if (!sess) {
846 spin_unlock_irqrestore(&ha->hardware_lock, flags);
847
848 mutex_lock(&ha->tgt.tgt_mutex);
849 sess = qlt_create_sess(vha, fcport, false);
850 mutex_unlock(&ha->tgt.tgt_mutex);
851
852 spin_lock_irqsave(&ha->hardware_lock, flags);
853 } else {
854 kref_get(&sess->se_sess->sess_kref);
855
856 if (sess->deleted) {
857 qlt_undelete_sess(sess);
858
859 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
860 "qla_target(%u): %ssession for port %02x:"
861 "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
862 "reappeared\n", vha->vp_idx, sess->local ? "local "
863 : "", sess->port_name[0], sess->port_name[1],
864 sess->port_name[2], sess->port_name[3],
865 sess->port_name[4], sess->port_name[5],
866 sess->port_name[6], sess->port_name[7],
867 sess->loop_id);
868
869 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
870 "Reappeared sess %p\n", sess);
871 }
872 sess->s_id = fcport->d_id;
873 sess->loop_id = fcport->loop_id;
874 sess->conf_compl_supported = !!(fcport->flags &
875 FCF_CONF_COMP_SUPPORTED);
876 }
877
878 if (sess && sess->local) {
879 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
880 "qla_target(%u): local session for "
881 "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
882 "(loop ID %d) became global\n", vha->vp_idx,
883 fcport->port_name[0], fcport->port_name[1],
884 fcport->port_name[2], fcport->port_name[3],
885 fcport->port_name[4], fcport->port_name[5],
886 fcport->port_name[6], fcport->port_name[7],
887 sess->loop_id);
888 sess->local = 0;
889 }
890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
891
892 ha->tgt.tgt_ops->put_sess(sess);
893}
894
895void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
896{
897 struct qla_hw_data *ha = vha->hw;
898 struct qla_tgt *tgt = ha->tgt.qla_tgt;
899 struct qla_tgt_sess *sess;
900 unsigned long flags;
901
902 if (!vha->hw->tgt.tgt_ops)
903 return;
904
905 if (!tgt || (fcport->port_type != FCT_INITIATOR))
906 return;
907
908 spin_lock_irqsave(&ha->hardware_lock, flags);
909 if (tgt->tgt_stop) {
910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
911 return;
912 }
913 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
914 if (!sess) {
915 spin_unlock_irqrestore(&ha->hardware_lock, flags);
916 return;
917 }
918
919 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
920
921 sess->local = 1;
922 qlt_schedule_sess_for_deletion(sess, false);
923 spin_unlock_irqrestore(&ha->hardware_lock, flags);
924}
925
926static inline int test_tgt_sess_count(struct qla_tgt *tgt)
927{
928 struct qla_hw_data *ha = tgt->ha;
929 unsigned long flags;
930 int res;
931 /*
932 * We need to protect against race, when tgt is freed before or
933 * inside wake_up()
934 */
935 spin_lock_irqsave(&ha->hardware_lock, flags);
936 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
937 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
938 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
939 res = (tgt->sess_count == 0);
940 spin_unlock_irqrestore(&ha->hardware_lock, flags);
941
942 return res;
943}
944
945/* Called by tcm_qla2xxx configfs code */
946void qlt_stop_phase1(struct qla_tgt *tgt)
947{
948 struct scsi_qla_host *vha = tgt->vha;
949 struct qla_hw_data *ha = tgt->ha;
950 unsigned long flags;
951
952 if (tgt->tgt_stop || tgt->tgt_stopped) {
953 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
954 "Already in tgt->tgt_stop or tgt_stopped state\n");
955 dump_stack();
956 return;
957 }
958
959 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
960 vha->host_no, vha);
961 /*
962 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
963 * Lock is needed, because we still can get an incoming packet.
964 */
965 mutex_lock(&ha->tgt.tgt_mutex);
966 spin_lock_irqsave(&ha->hardware_lock, flags);
967 tgt->tgt_stop = 1;
968 qlt_clear_tgt_db(tgt, true);
969 spin_unlock_irqrestore(&ha->hardware_lock, flags);
970 mutex_unlock(&ha->tgt.tgt_mutex);
971
972 flush_delayed_work_sync(&tgt->sess_del_work);
973
974 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
975 "Waiting for sess works (tgt %p)", tgt);
976 spin_lock_irqsave(&tgt->sess_work_lock, flags);
977 while (!list_empty(&tgt->sess_works_list)) {
978 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
979 flush_scheduled_work();
980 spin_lock_irqsave(&tgt->sess_work_lock, flags);
981 }
982 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
983
984 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
985 "Waiting for tgt %p: list_empty(sess_list)=%d "
986 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
987 tgt->sess_count);
988
989 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
990
991 /* Big hammer */
992 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
993 qlt_disable_vha(vha);
994
995 /* Wait for sessions to clear out (just in case) */
996 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
997}
998EXPORT_SYMBOL(qlt_stop_phase1);
999
1000/* Called by tcm_qla2xxx configfs code */
1001void qlt_stop_phase2(struct qla_tgt *tgt)
1002{
1003 struct qla_hw_data *ha = tgt->ha;
1004 unsigned long flags;
1005
1006 if (tgt->tgt_stopped) {
1007 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
1008 "Already in tgt->tgt_stopped state\n");
1009 dump_stack();
1010 return;
1011 }
1012
1013 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
1014 "Waiting for %d IRQ commands to complete (tgt %p)",
1015 tgt->irq_cmd_count, tgt);
1016
1017 mutex_lock(&ha->tgt.tgt_mutex);
1018 spin_lock_irqsave(&ha->hardware_lock, flags);
1019 while (tgt->irq_cmd_count != 0) {
1020 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1021 udelay(2);
1022 spin_lock_irqsave(&ha->hardware_lock, flags);
1023 }
1024 tgt->tgt_stop = 0;
1025 tgt->tgt_stopped = 1;
1026 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1027 mutex_unlock(&ha->tgt.tgt_mutex);
1028
1029 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
1030 tgt);
1031}
1032EXPORT_SYMBOL(qlt_stop_phase2);
1033
1034/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1035void qlt_release(struct qla_tgt *tgt)
1036{
1037 struct qla_hw_data *ha = tgt->ha;
1038
1039 if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1040 qlt_stop_phase2(tgt);
1041
1042 ha->tgt.qla_tgt = NULL;
1043
1044 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
1045 "Release of tgt %p finished\n", tgt);
1046
1047 kfree(tgt);
1048}
1049
1050/* ha->hardware_lock supposed to be held on entry */
1051static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1052 const void *param, unsigned int param_size)
1053{
1054 struct qla_tgt_sess_work_param *prm;
1055 unsigned long flags;
1056
1057 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1058 if (!prm) {
1059 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1060 "qla_target(%d): Unable to create session "
1061 "work, command will be refused", 0);
1062 return -ENOMEM;
1063 }
1064
1065 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1066 "Scheduling work (type %d, prm %p)"
1067 " to find session for param %p (size %d, tgt %p)\n",
1068 type, prm, param, param_size, tgt);
1069
1070 prm->type = type;
1071 memcpy(&prm->tm_iocb, param, param_size);
1072
1073 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1074 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1075 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1076
1077 schedule_work(&tgt->sess_work);
1078
1079 return 0;
1080}
1081
1082/*
1083 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1084 */
1085static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1086 struct imm_ntfy_from_isp *ntfy,
1087 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1088 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1089{
1090 struct qla_hw_data *ha = vha->hw;
1091 request_t *pkt;
1092 struct nack_to_isp *nack;
1093
1094 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1095
1096 /* Send marker if required */
1097 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1098 return;
1099
1100 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1101 if (!pkt) {
1102 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1103 "qla_target(%d): %s failed: unable to allocate "
1104 "request packet\n", vha->vp_idx, __func__);
1105 return;
1106 }
1107
1108 if (ha->tgt.qla_tgt != NULL)
1109 ha->tgt.qla_tgt->notify_ack_expected++;
1110
1111 pkt->entry_type = NOTIFY_ACK_TYPE;
1112 pkt->entry_count = 1;
1113
1114 nack = (struct nack_to_isp *)pkt;
1115 nack->ox_id = ntfy->ox_id;
1116
1117 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1118 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1119 nack->u.isp24.flags = ntfy->u.isp24.flags &
1120 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1121 }
1122 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1123 nack->u.isp24.status = ntfy->u.isp24.status;
1124 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1125 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1126 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1127 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1128 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1129 nack->u.isp24.srr_reject_code = srr_reject_code;
1130 nack->u.isp24.srr_reject_code_expl = srr_explan;
1131 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1132
1133 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1134 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1135 vha->vp_idx, nack->u.isp24.status);
1136
1137 qla2x00_start_iocbs(vha, vha->req);
1138}
1139
1140/*
1141 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1142 */
1143static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1144 struct abts_recv_from_24xx *abts, uint32_t status,
1145 bool ids_reversed)
1146{
1147 struct qla_hw_data *ha = vha->hw;
1148 struct abts_resp_to_24xx *resp;
1149 uint32_t f_ctl;
1150 uint8_t *p;
1151
1152 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1153 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1154 ha, abts, status);
1155
1156 /* Send marker if required */
1157 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1158 return;
1159
1160 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1161 if (!resp) {
1162 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1163 "qla_target(%d): %s failed: unable to allocate "
1164 "request packet", vha->vp_idx, __func__);
1165 return;
1166 }
1167
1168 resp->entry_type = ABTS_RESP_24XX;
1169 resp->entry_count = 1;
1170 resp->nport_handle = abts->nport_handle;
1171 resp->vp_index = vha->vp_idx;
1172 resp->sof_type = abts->sof_type;
1173 resp->exchange_address = abts->exchange_address;
1174 resp->fcp_hdr_le = abts->fcp_hdr_le;
1175 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1176 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1177 F_CTL_SEQ_INITIATIVE);
1178 p = (uint8_t *)&f_ctl;
1179 resp->fcp_hdr_le.f_ctl[0] = *p++;
1180 resp->fcp_hdr_le.f_ctl[1] = *p++;
1181 resp->fcp_hdr_le.f_ctl[2] = *p;
1182 if (ids_reversed) {
1183 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1184 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1185 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1186 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1187 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1188 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1189 } else {
1190 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1191 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1192 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1193 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1194 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1195 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1196 }
1197 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1198 if (status == FCP_TMF_CMPL) {
1199 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1200 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1201 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1202 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1203 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1204 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1205 } else {
1206 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1207 resp->payload.ba_rjt.reason_code =
1208 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1209 /* Other bytes are zero */
1210 }
1211
1212 ha->tgt.qla_tgt->abts_resp_expected++;
1213
1214 qla2x00_start_iocbs(vha, vha->req);
1215}
1216
1217/*
1218 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1219 */
1220static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1221 struct abts_resp_from_24xx_fw *entry)
1222{
1223 struct ctio7_to_24xx *ctio;
1224
1225 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1226 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1227 /* Send marker if required */
1228 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1229 return;
1230
1231 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1232 if (ctio == NULL) {
1233 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1234 "qla_target(%d): %s failed: unable to allocate "
1235 "request packet\n", vha->vp_idx, __func__);
1236 return;
1237 }
1238
1239 /*
1240 * We've got on entrance firmware's response on by us generated
1241 * ABTS response. So, in it ID fields are reversed.
1242 */
1243
1244 ctio->entry_type = CTIO_TYPE7;
1245 ctio->entry_count = 1;
1246 ctio->nport_handle = entry->nport_handle;
1247 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1248 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1249 ctio->vp_index = vha->vp_idx;
1250 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1251 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1252 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1253 ctio->exchange_addr = entry->exchange_addr_to_abort;
1254 ctio->u.status1.flags =
1255 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1256 CTIO7_FLAGS_TERMINATE);
1257 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1258
1259 qla2x00_start_iocbs(vha, vha->req);
1260
1261 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1262 FCP_TMF_CMPL, true);
1263}
1264
1265/* ha->hardware_lock supposed to be held on entry */
1266static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1267 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1268{
1269 struct qla_hw_data *ha = vha->hw;
1270 struct qla_tgt_mgmt_cmd *mcmd;
1271 int rc;
1272
1273 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1274 "qla_target(%d): task abort (tag=%d)\n",
1275 vha->vp_idx, abts->exchange_addr_to_abort);
1276
1277 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1278 if (mcmd == NULL) {
1279 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1280 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1281 vha->vp_idx, __func__);
1282 return -ENOMEM;
1283 }
1284 memset(mcmd, 0, sizeof(*mcmd));
1285
1286 mcmd->sess = sess;
1287 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1288
1289 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
1290 abts->exchange_addr_to_abort);
1291 if (rc != 0) {
1292 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1293 "qla_target(%d): tgt_ops->handle_tmr()"
1294 " failed: %d", vha->vp_idx, rc);
1295 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1296 return -EFAULT;
1297 }
1298
1299 return 0;
1300}
1301
1302/*
1303 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1304 */
1305static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1306 struct abts_recv_from_24xx *abts)
1307{
1308 struct qla_hw_data *ha = vha->hw;
1309 struct qla_tgt_sess *sess;
1310 uint32_t tag = abts->exchange_addr_to_abort;
1311 uint8_t s_id[3];
1312 int rc;
1313
1314 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1315 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1316 "qla_target(%d): ABTS: Abort Sequence not "
1317 "supported\n", vha->vp_idx);
1318 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1319 return;
1320 }
1321
1322 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1323 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1324 "qla_target(%d): ABTS: Unknown Exchange "
1325 "Address received\n", vha->vp_idx);
1326 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1327 return;
1328 }
1329
1330 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1331 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1332 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1333 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1334 le32_to_cpu(abts->fcp_hdr_le.parameter));
1335
1336 s_id[0] = abts->fcp_hdr_le.s_id[2];
1337 s_id[1] = abts->fcp_hdr_le.s_id[1];
1338 s_id[2] = abts->fcp_hdr_le.s_id[0];
1339
1340 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1341 if (!sess) {
1342 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1343 "qla_target(%d): task abort for non-existant session\n",
1344 vha->vp_idx);
1345 rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
1346 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1347 if (rc != 0) {
1348 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1349 false);
1350 }
1351 return;
1352 }
1353
1354 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1355 if (rc != 0) {
1356 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1357 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1358 vha->vp_idx, rc);
1359 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1360 return;
1361 }
1362}
1363
1364/*
1365 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1366 */
1367static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1368 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1369{
1370 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1371 struct ctio7_to_24xx *ctio;
1372
1373 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1374 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1375 ha, atio, resp_code);
1376
1377 /* Send marker if required */
1378 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1379 return;
1380
1381 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1382 if (ctio == NULL) {
1383 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1384 "qla_target(%d): %s failed: unable to allocate "
1385 "request packet\n", ha->vp_idx, __func__);
1386 return;
1387 }
1388
1389 ctio->entry_type = CTIO_TYPE7;
1390 ctio->entry_count = 1;
1391 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1392 ctio->nport_handle = mcmd->sess->loop_id;
1393 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1394 ctio->vp_index = ha->vp_idx;
1395 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1396 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1397 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1398 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1399 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1400 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1401 CTIO7_FLAGS_SEND_STATUS);
1402 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1403 ctio->u.status1.scsi_status =
1404 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1405 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1406 ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
1407
1408 qla2x00_start_iocbs(ha, ha->req);
1409}
1410
1411void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1412{
1413 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1414}
1415EXPORT_SYMBOL(qlt_free_mcmd);
1416
1417/* callback from target fabric module code */
1418void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1419{
1420 struct scsi_qla_host *vha = mcmd->sess->vha;
1421 struct qla_hw_data *ha = vha->hw;
1422 unsigned long flags;
1423
1424 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1425 "TM response mcmd (%p) status %#x state %#x",
1426 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1427
1428 spin_lock_irqsave(&ha->hardware_lock, flags);
1429 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1430 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1431 0, 0, 0, 0, 0, 0);
1432 else {
1433 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1434 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1435 mcmd->fc_tm_rsp, false);
1436 else
1437 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1438 mcmd->fc_tm_rsp);
1439 }
1440 /*
1441 * Make the callback for ->free_mcmd() to queue_work() and invoke
1442 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1443 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1444 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1445 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1446 * qlt_xmit_tm_rsp() returns here..
1447 */
1448 ha->tgt.tgt_ops->free_mcmd(mcmd);
1449 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1450}
1451EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1452
1453/* No locks */
1454static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1455{
1456 struct qla_tgt_cmd *cmd = prm->cmd;
1457
1458 BUG_ON(cmd->sg_cnt == 0);
1459
1460 prm->sg = (struct scatterlist *)cmd->sg;
1461 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1462 cmd->sg_cnt, cmd->dma_data_direction);
1463 if (unlikely(prm->seg_cnt == 0))
1464 goto out_err;
1465
1466 prm->cmd->sg_mapped = 1;
1467
1468 /*
1469 * If greater than four sg entries then we need to allocate
1470 * the continuation entries
1471 */
1472 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1473 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1474 prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
1475
1476 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1477 prm->seg_cnt, prm->req_cnt);
1478 return 0;
1479
1480out_err:
1481 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1482 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1483 0, prm->cmd->sg_cnt);
1484 return -1;
1485}
1486
1487static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
1488 struct qla_tgt_cmd *cmd)
1489{
1490 struct qla_hw_data *ha = vha->hw;
1491
1492 BUG_ON(!cmd->sg_mapped);
1493 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1494 cmd->sg_mapped = 0;
1495}
1496
1497static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1498 uint32_t req_cnt)
1499{
1500 struct qla_hw_data *ha = vha->hw;
1501 device_reg_t __iomem *reg = ha->iobase;
1502 uint32_t cnt;
1503
1504 if (vha->req->cnt < (req_cnt + 2)) {
1505 cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
1506
1507 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
1508 "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1509 "vha->req->cnt=%d, req_cnt=%d\n", cnt,
1510 vha->req->ring_index, vha->req->cnt, req_cnt);
1511 if (vha->req->ring_index < cnt)
1512 vha->req->cnt = cnt - vha->req->ring_index;
1513 else
1514 vha->req->cnt = vha->req->length -
1515 (vha->req->ring_index - cnt);
1516 }
1517
1518 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1519 ql_dbg(ql_dbg_tgt, vha, 0xe00b,
1520 "qla_target(%d): There is no room in the "
1521 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
1522 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
1523 vha->req->cnt, req_cnt);
1524 return -EAGAIN;
1525 }
1526 vha->req->cnt -= req_cnt;
1527
1528 return 0;
1529}
1530
1531/*
1532 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1533 */
1534static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1535{
1536 /* Adjust ring index. */
1537 vha->req->ring_index++;
1538 if (vha->req->ring_index == vha->req->length) {
1539 vha->req->ring_index = 0;
1540 vha->req->ring_ptr = vha->req->ring;
1541 } else {
1542 vha->req->ring_ptr++;
1543 }
1544 return (cont_entry_t *)vha->req->ring_ptr;
1545}
1546
1547/* ha->hardware_lock supposed to be held on entry */
1548static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1549{
1550 struct qla_hw_data *ha = vha->hw;
1551 uint32_t h;
1552
1553 h = ha->tgt.current_handle;
1554 /* always increment cmd handle */
1555 do {
1556 ++h;
1557 if (h > MAX_OUTSTANDING_COMMANDS)
1558 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1559 if (h == ha->tgt.current_handle) {
1560 ql_dbg(ql_dbg_tgt, vha, 0xe04e,
1561 "qla_target(%d): Ran out of "
1562 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1563 h = QLA_TGT_NULL_HANDLE;
1564 break;
1565 }
1566 } while ((h == QLA_TGT_NULL_HANDLE) ||
1567 (h == QLA_TGT_SKIP_HANDLE) ||
1568 (ha->tgt.cmds[h-1] != NULL));
1569
1570 if (h != QLA_TGT_NULL_HANDLE)
1571 ha->tgt.current_handle = h;
1572
1573 return h;
1574}
1575
1576/* ha->hardware_lock supposed to be held on entry */
1577static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1578 struct scsi_qla_host *vha)
1579{
1580 uint32_t h;
1581 struct ctio7_to_24xx *pkt;
1582 struct qla_hw_data *ha = vha->hw;
1583 struct atio_from_isp *atio = &prm->cmd->atio;
1584
1585 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1586 prm->pkt = pkt;
1587 memset(pkt, 0, sizeof(*pkt));
1588
1589 pkt->entry_type = CTIO_TYPE7;
1590 pkt->entry_count = (uint8_t)prm->req_cnt;
1591 pkt->vp_index = vha->vp_idx;
1592
1593 h = qlt_make_handle(vha);
1594 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1595 /*
1596 * CTIO type 7 from the firmware doesn't provide a way to
1597 * know the initiator's LOOP ID, hence we can't find
1598 * the session and, so, the command.
1599 */
1600 return -EAGAIN;
1601 } else
1602 ha->tgt.cmds[h-1] = prm->cmd;
1603
1604 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1605 pkt->nport_handle = prm->cmd->loop_id;
1606 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1607 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1608 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1609 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1610 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1611 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1612 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1613 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1614
1615 ql_dbg(ql_dbg_tgt, vha, 0xe00c,
1616 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1617 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
1618 le16_to_cpu(pkt->u.status0.ox_id));
1619 return 0;
1620}
1621
1622/*
1623 * ha->hardware_lock supposed to be held on entry. We have already made sure
1624 * that there is sufficient amount of request entries to not drop it.
1625 */
1626static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1627 struct scsi_qla_host *vha)
1628{
1629 int cnt;
1630 uint32_t *dword_ptr;
1631 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1632
1633 /* Build continuation packets */
1634 while (prm->seg_cnt > 0) {
1635 cont_a64_entry_t *cont_pkt64 =
1636 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1637
1638 /*
1639 * Make sure that from cont_pkt64 none of
1640 * 64-bit specific fields used for 32-bit
1641 * addressing. Cast to (cont_entry_t *) for
1642 * that.
1643 */
1644
1645 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1646
1647 cont_pkt64->entry_count = 1;
1648 cont_pkt64->sys_define = 0;
1649
1650 if (enable_64bit_addressing) {
1651 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1652 dword_ptr =
1653 (uint32_t *)&cont_pkt64->dseg_0_address;
1654 } else {
1655 cont_pkt64->entry_type = CONTINUE_TYPE;
1656 dword_ptr =
1657 (uint32_t *)&((cont_entry_t *)
1658 cont_pkt64)->dseg_0_address;
1659 }
1660
1661 /* Load continuation entry data segments */
1662 for (cnt = 0;
1663 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1664 cnt++, prm->seg_cnt--) {
1665 *dword_ptr++ =
1666 cpu_to_le32(pci_dma_lo32
1667 (sg_dma_address(prm->sg)));
1668 if (enable_64bit_addressing) {
1669 *dword_ptr++ =
1670 cpu_to_le32(pci_dma_hi32
1671 (sg_dma_address
1672 (prm->sg)));
1673 }
1674 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1675
1676 ql_dbg(ql_dbg_tgt, vha, 0xe00d,
1677 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1678 (long long unsigned int)
1679 pci_dma_hi32(sg_dma_address(prm->sg)),
1680 (long long unsigned int)
1681 pci_dma_lo32(sg_dma_address(prm->sg)),
1682 (int)sg_dma_len(prm->sg));
1683
1684 prm->sg = sg_next(prm->sg);
1685 }
1686 }
1687}
1688
1689/*
1690 * ha->hardware_lock supposed to be held on entry. We have already made sure
1691 * that there is sufficient amount of request entries to not drop it.
1692 */
1693static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1694 struct scsi_qla_host *vha)
1695{
1696 int cnt;
1697 uint32_t *dword_ptr;
1698 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1699 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1700
1701 ql_dbg(ql_dbg_tgt, vha, 0xe00e,
1702 "iocb->scsi_status=%x, iocb->flags=%x\n",
1703 le16_to_cpu(pkt24->u.status0.scsi_status),
1704 le16_to_cpu(pkt24->u.status0.flags));
1705
1706 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1707
1708 /* Setup packet address segment pointer */
1709 dword_ptr = pkt24->u.status0.dseg_0_address;
1710
1711 /* Set total data segment count */
1712 if (prm->seg_cnt)
1713 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1714
1715 if (prm->seg_cnt == 0) {
1716 /* No data transfer */
1717 *dword_ptr++ = 0;
1718 *dword_ptr = 0;
1719 return;
1720 }
1721
1722 /* If scatter gather */
1723 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
1724
1725 /* Load command entry data segments */
1726 for (cnt = 0;
1727 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1728 cnt++, prm->seg_cnt--) {
1729 *dword_ptr++ =
1730 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1731 if (enable_64bit_addressing) {
1732 *dword_ptr++ =
1733 cpu_to_le32(pci_dma_hi32(
1734 sg_dma_address(prm->sg)));
1735 }
1736 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1737
1738 ql_dbg(ql_dbg_tgt, vha, 0xe010,
1739 "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1740 (long long unsigned int)pci_dma_hi32(sg_dma_address(
1741 prm->sg)),
1742 (long long unsigned int)pci_dma_lo32(sg_dma_address(
1743 prm->sg)),
1744 (int)sg_dma_len(prm->sg));
1745
1746 prm->sg = sg_next(prm->sg);
1747 }
1748
1749 qlt_load_cont_data_segments(prm, vha);
1750}
1751
1752static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1753{
1754 return cmd->bufflen > 0;
1755}
1756
1757/*
1758 * Called without ha->hardware_lock held
1759 */
1760static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1761 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1762 uint32_t *full_req_cnt)
1763{
1764 struct qla_tgt *tgt = cmd->tgt;
1765 struct scsi_qla_host *vha = tgt->vha;
1766 struct qla_hw_data *ha = vha->hw;
1767 struct se_cmd *se_cmd = &cmd->se_cmd;
1768
1769 if (unlikely(cmd->aborted)) {
1770 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1771 "qla_target(%d): terminating exchange "
1772 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
1773 se_cmd, cmd->tag);
1774
1775 cmd->state = QLA_TGT_STATE_ABORTED;
1776
1777 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1778
1779 /* !! At this point cmd could be already freed !! */
1780 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1781 }
1782
1783 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
1784 vha->vp_idx, cmd->tag);
1785
1786 prm->cmd = cmd;
1787 prm->tgt = tgt;
1788 prm->rq_result = scsi_status;
1789 prm->sense_buffer = &cmd->sense_buffer[0];
1790 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
1791 prm->sg = NULL;
1792 prm->seg_cnt = -1;
1793 prm->req_cnt = 1;
1794 prm->add_status_pkt = 0;
1795
1796 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
1797 prm->rq_result, xmit_type);
1798
1799 /* Send marker if required */
1800 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1801 return -EFAULT;
1802
1803 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
1804
1805 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1806 if (qlt_pci_map_calc_cnt(prm) != 0)
1807 return -EAGAIN;
1808 }
1809
1810 *full_req_cnt = prm->req_cnt;
1811
1812 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1813 prm->residual = se_cmd->residual_count;
1814 ql_dbg(ql_dbg_tgt, vha, 0xe014,
1815 "Residual underflow: %d (tag %d, "
1816 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1817 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1818 cmd->bufflen, prm->rq_result);
1819 prm->rq_result |= SS_RESIDUAL_UNDER;
1820 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1821 prm->residual = se_cmd->residual_count;
1822 ql_dbg(ql_dbg_tgt, vha, 0xe015,
1823 "Residual overflow: %d (tag %d, "
1824 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1825 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1826 cmd->bufflen, prm->rq_result);
1827 prm->rq_result |= SS_RESIDUAL_OVER;
1828 }
1829
1830 if (xmit_type & QLA_TGT_XMIT_STATUS) {
1831 /*
1832 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1833 * ignored in *xmit_response() below
1834 */
1835 if (qlt_has_data(cmd)) {
1836 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1837 (IS_FWI2_CAPABLE(ha) &&
1838 (prm->rq_result != 0))) {
1839 prm->add_status_pkt = 1;
1840 (*full_req_cnt)++;
1841 }
1842 }
1843 }
1844
1845 ql_dbg(ql_dbg_tgt, vha, 0xe016,
1846 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1847 prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
1848
1849 return 0;
1850}
1851
1852static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1853 struct qla_tgt_cmd *cmd, int sending_sense)
1854{
1855 if (ha->tgt.enable_class_2)
1856 return 0;
1857
1858 if (sending_sense)
1859 return cmd->conf_compl_supported;
1860 else
1861 return ha->tgt.enable_explicit_conf &&
1862 cmd->conf_compl_supported;
1863}
1864
1865#ifdef CONFIG_QLA_TGT_DEBUG_SRR
1866/*
1867 * Original taken from the XFS code
1868 */
1869static unsigned long qlt_srr_random(void)
1870{
1871 static int Inited;
1872 static unsigned long RandomValue;
1873 static DEFINE_SPINLOCK(lock);
1874 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1875 register long rv;
1876 register long lo;
1877 register long hi;
1878 unsigned long flags;
1879
1880 spin_lock_irqsave(&lock, flags);
1881 if (!Inited) {
1882 RandomValue = jiffies;
1883 Inited = 1;
1884 }
1885 rv = RandomValue;
1886 hi = rv / 127773;
1887 lo = rv % 127773;
1888 rv = 16807 * lo - 2836 * hi;
1889 if (rv <= 0)
1890 rv += 2147483647;
1891 RandomValue = rv;
1892 spin_unlock_irqrestore(&lock, flags);
1893 return rv;
1894}
1895
1896static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1897{
1898#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1899 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
1900 == 50) {
1901 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
1902 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
1903 "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
1904 }
1905#endif
1906 /*
1907 * It's currently not possible to simulate SRRs for FCP_WRITE without
1908 * a physical link layer failure, so don't even try here..
1909 */
1910 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
1911 return;
1912
1913 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
1914 ((qlt_srr_random() % 100) == 20)) {
1915 int i, leave = 0;
1916 unsigned int tot_len = 0;
1917
1918 while (leave == 0)
1919 leave = qlt_srr_random() % cmd->sg_cnt;
1920
1921 for (i = 0; i < leave; i++)
1922 tot_len += cmd->sg[i].length;
1923
1924 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
1925 "Cutting cmd %p (tag %d) buffer"
1926 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1927 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
1928 cmd->bufflen, cmd->sg_cnt);
1929
1930 cmd->bufflen = tot_len;
1931 cmd->sg_cnt = leave;
1932 }
1933
1934 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
1935 unsigned int offset = qlt_srr_random() % cmd->bufflen;
1936
1937 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
1938 "Cutting cmd %p (tag %d) buffer head "
1939 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
1940 cmd->bufflen);
1941 if (offset == 0)
1942 *xmit_type &= ~QLA_TGT_XMIT_DATA;
1943 else if (qlt_set_data_offset(cmd, offset)) {
1944 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
1945 "qlt_set_data_offset() failed (tag %d)", cmd->tag);
1946 }
1947 }
1948}
1949#else
1950static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1951{}
1952#endif
1953
1954static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
1955 struct qla_tgt_prm *prm)
1956{
1957 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
1958 (uint32_t)sizeof(ctio->u.status1.sense_data));
1959 ctio->u.status0.flags |=
1960 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
1961 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1962 ctio->u.status0.flags |= __constant_cpu_to_le16(
1963 CTIO7_FLAGS_EXPLICIT_CONFORM |
1964 CTIO7_FLAGS_CONFORM_REQ);
1965 }
1966 ctio->u.status0.residual = cpu_to_le32(prm->residual);
1967 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
1968 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
1969 int i;
1970
1971 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1972 if (prm->cmd->se_cmd.scsi_status != 0) {
1973 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
1974 "Skipping EXPLICIT_CONFORM and "
1975 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1976 "non GOOD status\n");
1977 goto skip_explict_conf;
1978 }
1979 ctio->u.status1.flags |= __constant_cpu_to_le16(
1980 CTIO7_FLAGS_EXPLICIT_CONFORM |
1981 CTIO7_FLAGS_CONFORM_REQ);
1982 }
1983skip_explict_conf:
1984 ctio->u.status1.flags &=
1985 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1986 ctio->u.status1.flags |=
1987 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1988 ctio->u.status1.scsi_status |=
1989 __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
1990 ctio->u.status1.sense_length =
1991 cpu_to_le16(prm->sense_buffer_len);
1992 for (i = 0; i < prm->sense_buffer_len/4; i++)
1993 ((uint32_t *)ctio->u.status1.sense_data)[i] =
1994 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
1995#if 0
1996 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
1997 static int q;
1998 if (q < 10) {
1999 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2000 "qla_target(%d): %d bytes of sense "
2001 "lost", prm->tgt->ha->vp_idx,
2002 prm->sense_buffer_len % 4);
2003 q++;
2004 }
2005 }
2006#endif
2007 } else {
2008 ctio->u.status1.flags &=
2009 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2010 ctio->u.status1.flags |=
2011 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2012 ctio->u.status1.sense_length = 0;
2013 memset(ctio->u.status1.sense_data, 0,
2014 sizeof(ctio->u.status1.sense_data));
2015 }
2016
2017 /* Sense with len > 24, is it possible ??? */
2018}
2019
2020/*
2021 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2022 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2023 */
2024int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2025 uint8_t scsi_status)
2026{
2027 struct scsi_qla_host *vha = cmd->vha;
2028 struct qla_hw_data *ha = vha->hw;
2029 struct ctio7_to_24xx *pkt;
2030 struct qla_tgt_prm prm;
2031 uint32_t full_req_cnt = 0;
2032 unsigned long flags = 0;
2033 int res;
2034
2035 memset(&prm, 0, sizeof(prm));
2036 qlt_check_srr_debug(cmd, &xmit_type);
2037
2038 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2039 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
2040 "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
2041 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
2042
2043 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2044 &full_req_cnt);
2045 if (unlikely(res != 0)) {
2046 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2047 return 0;
2048
2049 return res;
2050 }
2051
2052 spin_lock_irqsave(&ha->hardware_lock, flags);
2053
2054 /* Does F/W have an IOCBs for this request */
2055 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2056 if (unlikely(res))
2057 goto out_unmap_unlock;
2058
2059 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2060 if (unlikely(res != 0))
2061 goto out_unmap_unlock;
2062
2063
2064 pkt = (struct ctio7_to_24xx *)prm.pkt;
2065
2066 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2067 pkt->u.status0.flags |=
2068 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2069 CTIO7_FLAGS_STATUS_MODE_0);
2070
2071 qlt_load_data_segments(&prm, vha);
2072
2073 if (prm.add_status_pkt == 0) {
2074 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2075 pkt->u.status0.scsi_status =
2076 cpu_to_le16(prm.rq_result);
2077 pkt->u.status0.residual =
2078 cpu_to_le32(prm.residual);
2079 pkt->u.status0.flags |= __constant_cpu_to_le16(
2080 CTIO7_FLAGS_SEND_STATUS);
2081 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2082 pkt->u.status0.flags |=
2083 __constant_cpu_to_le16(
2084 CTIO7_FLAGS_EXPLICIT_CONFORM |
2085 CTIO7_FLAGS_CONFORM_REQ);
2086 }
2087 }
2088
2089 } else {
2090 /*
2091 * We have already made sure that there is sufficient
2092 * amount of request entries to not drop HW lock in
2093 * req_pkt().
2094 */
2095 struct ctio7_to_24xx *ctio =
2096 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2097
2098 ql_dbg(ql_dbg_tgt, vha, 0xe019,
2099 "Building additional status packet\n");
2100
2101 memcpy(ctio, pkt, sizeof(*ctio));
2102 ctio->entry_count = 1;
2103 ctio->dseg_count = 0;
2104 ctio->u.status1.flags &= ~__constant_cpu_to_le16(
2105 CTIO7_FLAGS_DATA_IN);
2106
2107 /* Real finish is ctio_m1's finish */
2108 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2109 pkt->u.status0.flags |= __constant_cpu_to_le16(
2110 CTIO7_FLAGS_DONT_RET_CTIO);
2111 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2112 &prm);
2113 pr_debug("Status CTIO7: %p\n", ctio);
2114 }
2115 } else
2116 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2117
2118
2119 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2120
2121 ql_dbg(ql_dbg_tgt, vha, 0xe01a,
2122 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
2123 pkt, scsi_status);
2124
2125 qla2x00_start_iocbs(vha, vha->req);
2126 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2127
2128 return 0;
2129
2130out_unmap_unlock:
2131 if (cmd->sg_mapped)
2132 qlt_unmap_sg(vha, cmd);
2133 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2134
2135 return res;
2136}
2137EXPORT_SYMBOL(qlt_xmit_response);
2138
2139int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2140{
2141 struct ctio7_to_24xx *pkt;
2142 struct scsi_qla_host *vha = cmd->vha;
2143 struct qla_hw_data *ha = vha->hw;
2144 struct qla_tgt *tgt = cmd->tgt;
2145 struct qla_tgt_prm prm;
2146 unsigned long flags;
2147 int res = 0;
2148
2149 memset(&prm, 0, sizeof(prm));
2150 prm.cmd = cmd;
2151 prm.tgt = tgt;
2152 prm.sg = NULL;
2153 prm.req_cnt = 1;
2154
2155 /* Send marker if required */
2156 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2157 return -EIO;
2158
2159 ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
2160 (int)vha->vp_idx);
2161
2162 /* Calculate number of entries and segments required */
2163 if (qlt_pci_map_calc_cnt(&prm) != 0)
2164 return -EAGAIN;
2165
2166 spin_lock_irqsave(&ha->hardware_lock, flags);
2167
2168 /* Does F/W have an IOCBs for this request */
2169 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2170 if (res != 0)
2171 goto out_unlock_free_unmap;
2172
2173 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2174 if (unlikely(res != 0))
2175 goto out_unlock_free_unmap;
2176 pkt = (struct ctio7_to_24xx *)prm.pkt;
2177 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2178 CTIO7_FLAGS_STATUS_MODE_0);
2179 qlt_load_data_segments(&prm, vha);
2180
2181 cmd->state = QLA_TGT_STATE_NEED_DATA;
2182
2183 qla2x00_start_iocbs(vha, vha->req);
2184 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2185
2186 return res;
2187
2188out_unlock_free_unmap:
2189 if (cmd->sg_mapped)
2190 qlt_unmap_sg(vha, cmd);
2191 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2192
2193 return res;
2194}
2195EXPORT_SYMBOL(qlt_rdy_to_xfer);
2196
2197/* If hardware_lock held on entry, might drop it, then reaquire */
2198/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2199static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2200 struct qla_tgt_cmd *cmd,
2201 struct atio_from_isp *atio)
2202{
2203 struct ctio7_to_24xx *ctio24;
2204 struct qla_hw_data *ha = vha->hw;
2205 request_t *pkt;
2206 int ret = 0;
2207
2208 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2209
2210 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
2211 if (pkt == NULL) {
2212 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2213 "qla_target(%d): %s failed: unable to allocate "
2214 "request packet\n", vha->vp_idx, __func__);
2215 return -ENOMEM;
2216 }
2217
2218 if (cmd != NULL) {
2219 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2220 ql_dbg(ql_dbg_tgt, vha, 0xe051,
2221 "qla_target(%d): Terminating cmd %p with "
2222 "incorrect state %d\n", vha->vp_idx, cmd,
2223 cmd->state);
2224 } else
2225 ret = 1;
2226 }
2227
2228 pkt->entry_count = 1;
2229 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2230
2231 ctio24 = (struct ctio7_to_24xx *)pkt;
2232 ctio24->entry_type = CTIO_TYPE7;
2233 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2234 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2235 ctio24->vp_index = vha->vp_idx;
2236 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2237 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2238 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2239 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2240 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2241 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2242 CTIO7_FLAGS_TERMINATE);
2243 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
2244
2245 /* Most likely, it isn't needed */
2246 ctio24->u.status1.residual = get_unaligned((uint32_t *)
2247 &atio->u.isp24.fcp_cmnd.add_cdb[
2248 atio->u.isp24.fcp_cmnd.add_cdb_len]);
2249 if (ctio24->u.status1.residual != 0)
2250 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2251
2252 qla2x00_start_iocbs(vha, vha->req);
2253 return ret;
2254}
2255
2256static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2257 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2258{
2259 unsigned long flags;
2260 int rc;
2261
2262 if (qlt_issue_marker(vha, ha_locked) < 0)
2263 return;
2264
2265 if (ha_locked) {
2266 rc = __qlt_send_term_exchange(vha, cmd, atio);
2267 goto done;
2268 }
2269 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2270 rc = __qlt_send_term_exchange(vha, cmd, atio);
2271 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2272done:
2273 if (rc == 1) {
2274 if (!ha_locked && !in_interrupt())
2275 msleep(250); /* just in case */
2276
2277 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2278 }
2279}
2280
2281void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2282{
2283 BUG_ON(cmd->sg_mapped);
2284
2285 if (unlikely(cmd->free_sg))
2286 kfree(cmd->sg);
2287 kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2288}
2289EXPORT_SYMBOL(qlt_free_cmd);
2290
2291/* ha->hardware_lock supposed to be held on entry */
2292static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2293 struct qla_tgt_cmd *cmd, void *ctio)
2294{
2295 struct qla_tgt_srr_ctio *sc;
2296 struct qla_hw_data *ha = vha->hw;
2297 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2298 struct qla_tgt_srr_imm *imm;
2299
2300 tgt->ctio_srr_id++;
2301
2302 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2303 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
2304
2305 if (!ctio) {
2306 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
2307 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2308 vha->vp_idx);
2309 return -EINVAL;
2310 }
2311
2312 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2313 if (sc != NULL) {
2314 sc->cmd = cmd;
2315 /* IRQ is already OFF */
2316 spin_lock(&tgt->srr_lock);
2317 sc->srr_id = tgt->ctio_srr_id;
2318 list_add_tail(&sc->srr_list_entry,
2319 &tgt->srr_ctio_list);
2320 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
2321 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
2322 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2323 int found = 0;
2324 list_for_each_entry(imm, &tgt->srr_imm_list,
2325 srr_list_entry) {
2326 if (imm->srr_id == sc->srr_id) {
2327 found = 1;
2328 break;
2329 }
2330 }
2331 if (found) {
2332 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
2333 "Scheduling srr work\n");
2334 schedule_work(&tgt->srr_work);
2335 } else {
2336 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
2337 "qla_target(%d): imm_srr_id "
2338 "== ctio_srr_id (%d), but there is no "
2339 "corresponding SRR IMM, deleting CTIO "
2340 "SRR %p\n", vha->vp_idx,
2341 tgt->ctio_srr_id, sc);
2342 list_del(&sc->srr_list_entry);
2343 spin_unlock(&tgt->srr_lock);
2344
2345 kfree(sc);
2346 return -EINVAL;
2347 }
2348 }
2349 spin_unlock(&tgt->srr_lock);
2350 } else {
2351 struct qla_tgt_srr_imm *ti;
2352
2353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
2354 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2355 vha->vp_idx);
2356 spin_lock(&tgt->srr_lock);
2357 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2358 srr_list_entry) {
2359 if (imm->srr_id == tgt->ctio_srr_id) {
2360 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
2361 "IMM SRR %p deleted (id %d)\n",
2362 imm, imm->srr_id);
2363 list_del(&imm->srr_list_entry);
2364 qlt_reject_free_srr_imm(vha, imm, 1);
2365 }
2366 }
2367 spin_unlock(&tgt->srr_lock);
2368
2369 return -ENOMEM;
2370 }
2371
2372 return 0;
2373}
2374
2375/*
2376 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2377 */
2378static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
2379 struct qla_tgt_cmd *cmd, uint32_t status)
2380{
2381 int term = 0;
2382
2383 if (ctio != NULL) {
2384 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
2385 term = !(c->flags &
2386 __constant_cpu_to_le16(OF_TERM_EXCH));
2387 } else
2388 term = 1;
2389
2390 if (term)
2391 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2392
2393 return term;
2394}
2395
2396/* ha->hardware_lock supposed to be held on entry */
2397static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
2398 uint32_t handle)
2399{
2400 struct qla_hw_data *ha = vha->hw;
2401
2402 handle--;
2403 if (ha->tgt.cmds[handle] != NULL) {
2404 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2405 ha->tgt.cmds[handle] = NULL;
2406 return cmd;
2407 } else
2408 return NULL;
2409}
2410
2411/* ha->hardware_lock supposed to be held on entry */
2412static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2413 uint32_t handle, void *ctio)
2414{
2415 struct qla_tgt_cmd *cmd = NULL;
2416
2417 /* Clear out internal marks */
2418 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
2419 CTIO_INTERMEDIATE_HANDLE_MARK);
2420
2421 if (handle != QLA_TGT_NULL_HANDLE) {
2422 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
2423 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
2424 "SKIP_HANDLE CTIO\n");
2425 return NULL;
2426 }
2427 /* handle-1 is actually used */
2428 if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
2429 ql_dbg(ql_dbg_tgt, vha, 0xe052,
2430 "qla_target(%d): Wrong handle %x received\n",
2431 vha->vp_idx, handle);
2432 return NULL;
2433 }
2434 cmd = qlt_get_cmd(vha, handle);
2435 if (unlikely(cmd == NULL)) {
2436 ql_dbg(ql_dbg_tgt, vha, 0xe053,
2437 "qla_target(%d): Suspicious: unable to "
2438 "find the command with handle %x\n", vha->vp_idx,
2439 handle);
2440 return NULL;
2441 }
2442 } else if (ctio != NULL) {
2443 /* We can't get loop ID from CTIO7 */
2444 ql_dbg(ql_dbg_tgt, vha, 0xe054,
2445 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2446 "support NULL handles\n", vha->vp_idx);
2447 return NULL;
2448 }
2449
2450 return cmd;
2451}
2452
2453/*
2454 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2455 */
2456static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2457 uint32_t status, void *ctio)
2458{
2459 struct qla_hw_data *ha = vha->hw;
2460 struct se_cmd *se_cmd;
2461 struct target_core_fabric_ops *tfo;
2462 struct qla_tgt_cmd *cmd;
2463
2464 ql_dbg(ql_dbg_tgt, vha, 0xe01e,
2465 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2466 vha->vp_idx, ctio, status, handle);
2467
2468 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2469 /* That could happen only in case of an error/reset/abort */
2470 if (status != CTIO_SUCCESS) {
2471 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
2472 "Intermediate CTIO received"
2473 " (status %x)\n", status);
2474 }
2475 return;
2476 }
2477
2478 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
2479 if (cmd == NULL)
2480 return;
2481
2482 se_cmd = &cmd->se_cmd;
2483 tfo = se_cmd->se_tfo;
2484
2485 if (cmd->sg_mapped)
2486 qlt_unmap_sg(vha, cmd);
2487
2488 if (unlikely(status != CTIO_SUCCESS)) {
2489 switch (status & 0xFFFF) {
2490 case CTIO_LIP_RESET:
2491 case CTIO_TARGET_RESET:
2492 case CTIO_ABORTED:
2493 case CTIO_TIMEOUT:
2494 case CTIO_INVALID_RX_ID:
2495 /* They are OK */
2496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
2497 "qla_target(%d): CTIO with "
2498 "status %#x received, state %x, se_cmd %p, "
2499 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2500 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
2501 status, cmd->state, se_cmd);
2502 break;
2503
2504 case CTIO_PORT_LOGGED_OUT:
2505 case CTIO_PORT_UNAVAILABLE:
2506 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
2507 "qla_target(%d): CTIO with PORT LOGGED "
2508 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2509 "received (state %x, se_cmd %p)\n", vha->vp_idx,
2510 status, cmd->state, se_cmd);
2511 break;
2512
2513 case CTIO_SRR_RECEIVED:
2514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
2515 "qla_target(%d): CTIO with SRR_RECEIVED"
2516 " status %x received (state %x, se_cmd %p)\n",
2517 vha->vp_idx, status, cmd->state, se_cmd);
2518 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
2519 break;
2520 else
2521 return;
2522
2523 default:
2524 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
2525 "qla_target(%d): CTIO with error status "
2526 "0x%x received (state %x, se_cmd %p\n",
2527 vha->vp_idx, status, cmd->state, se_cmd);
2528 break;
2529 }
2530
2531 if (cmd->state != QLA_TGT_STATE_NEED_DATA)
2532 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
2533 return;
2534 }
2535
2536 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
2537 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
2538 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
2539 int rx_status = 0;
2540
2541 cmd->state = QLA_TGT_STATE_DATA_IN;
2542
2543 if (unlikely(status != CTIO_SUCCESS))
2544 rx_status = -EIO;
2545 else
2546 cmd->write_data_transferred = 1;
2547
2548 ql_dbg(ql_dbg_tgt, vha, 0xe020,
2549 "Data received, context %x, rx_status %d\n",
2550 0x0, rx_status);
2551
2552 ha->tgt.tgt_ops->handle_data(cmd);
2553 return;
2554 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
2555 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
2556 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
2557 } else {
2558 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
2559 "qla_target(%d): A command in state (%d) should "
2560 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
2561 }
2562
2563 if (unlikely(status != CTIO_SUCCESS)) {
2564 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
2565 dump_stack();
2566 }
2567
2568 ha->tgt.tgt_ops->free_cmd(cmd);
2569}
2570
2571/* ha->hardware_lock supposed to be held on entry */
2572/* called via callback from qla2xxx */
2573void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
2574{
2575 struct qla_hw_data *ha = vha->hw;
2576 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2577
2578 if (likely(tgt == NULL)) {
2579 ql_dbg(ql_dbg_tgt, vha, 0xe021,
2580 "CTIO, but target mode not enabled"
2581 " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
2582 return;
2583 }
2584
2585 tgt->irq_cmd_count++;
2586 qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
2587 tgt->irq_cmd_count--;
2588}
2589
2590static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
2591 uint8_t task_codes)
2592{
2593 int fcp_task_attr;
2594
2595 switch (task_codes) {
2596 case ATIO_SIMPLE_QUEUE:
2597 fcp_task_attr = MSG_SIMPLE_TAG;
2598 break;
2599 case ATIO_HEAD_OF_QUEUE:
2600 fcp_task_attr = MSG_HEAD_TAG;
2601 break;
2602 case ATIO_ORDERED_QUEUE:
2603 fcp_task_attr = MSG_ORDERED_TAG;
2604 break;
2605 case ATIO_ACA_QUEUE:
2606 fcp_task_attr = MSG_ACA_TAG;
2607 break;
2608 case ATIO_UNTAGGED:
2609 fcp_task_attr = MSG_SIMPLE_TAG;
2610 break;
2611 default:
2612 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
2613 "qla_target: unknown task code %x, use ORDERED instead\n",
2614 task_codes);
2615 fcp_task_attr = MSG_ORDERED_TAG;
2616 break;
2617 }
2618
2619 return fcp_task_attr;
2620}
2621
2622static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
2623 uint8_t *);
2624/*
2625 * Process context for I/O path into tcm_qla2xxx code
2626 */
2627static void qlt_do_work(struct work_struct *work)
2628{
2629 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
2630 scsi_qla_host_t *vha = cmd->vha;
2631 struct qla_hw_data *ha = vha->hw;
2632 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2633 struct qla_tgt_sess *sess = NULL;
2634 struct atio_from_isp *atio = &cmd->atio;
2635 unsigned char *cdb;
2636 unsigned long flags;
2637 uint32_t data_length;
2638 int ret, fcp_task_attr, data_dir, bidi = 0;
2639
2640 if (tgt->tgt_stop)
2641 goto out_term;
2642
2643 spin_lock_irqsave(&ha->hardware_lock, flags);
2644 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2645 atio->u.isp24.fcp_hdr.s_id);
2646 if (sess) {
2647 if (unlikely(sess->tearing_down)) {
2648 sess = NULL;
2649 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2650 goto out_term;
2651 } else {
2652 /*
2653 * Do the extra kref_get() before dropping
2654 * qla_hw_data->hardware_lock.
2655 */
2656 kref_get(&sess->se_sess->sess_kref);
2657 }
2658 }
2659 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2660
2661 if (unlikely(!sess)) {
2662 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
2663
2664 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
2665 "qla_target(%d): Unable to find wwn login"
2666 " (s_id %x:%x:%x), trying to create it manually\n",
2667 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
2668
2669 if (atio->u.raw.entry_count > 1) {
2670 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
2671 "Dropping multy entry cmd %p\n", cmd);
2672 goto out_term;
2673 }
2674
2675 mutex_lock(&ha->tgt.tgt_mutex);
2676 sess = qlt_make_local_sess(vha, s_id);
2677 /* sess has an extra creation ref. */
2678 mutex_unlock(&ha->tgt.tgt_mutex);
2679
2680 if (!sess)
2681 goto out_term;
2682 }
2683
2684 cmd->sess = sess;
2685 cmd->loop_id = sess->loop_id;
2686 cmd->conf_compl_supported = sess->conf_compl_supported;
2687
2688 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
2689 cmd->tag = atio->u.isp24.exchange_addr;
2690 cmd->unpacked_lun = scsilun_to_int(
2691 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
2692
2693 if (atio->u.isp24.fcp_cmnd.rddata &&
2694 atio->u.isp24.fcp_cmnd.wrdata) {
2695 bidi = 1;
2696 data_dir = DMA_TO_DEVICE;
2697 } else if (atio->u.isp24.fcp_cmnd.rddata)
2698 data_dir = DMA_FROM_DEVICE;
2699 else if (atio->u.isp24.fcp_cmnd.wrdata)
2700 data_dir = DMA_TO_DEVICE;
2701 else
2702 data_dir = DMA_NONE;
2703
2704 fcp_task_attr = qlt_get_fcp_task_attr(vha,
2705 atio->u.isp24.fcp_cmnd.task_attr);
2706 data_length = be32_to_cpu(get_unaligned((uint32_t *)
2707 &atio->u.isp24.fcp_cmnd.add_cdb[
2708 atio->u.isp24.fcp_cmnd.add_cdb_len]));
2709
2710 ql_dbg(ql_dbg_tgt, vha, 0xe022,
2711 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
2712 cmd, cmd->unpacked_lun, cmd->tag);
2713
2714 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
2715 fcp_task_attr, data_dir, bidi);
2716 if (ret != 0)
2717 goto out_term;
2718 /*
2719 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
2720 */
2721 ha->tgt.tgt_ops->put_sess(sess);
2722 return;
2723
2724out_term:
2725 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
2726 /*
2727 * cmd has not sent to target yet, so pass NULL as the second
2728 * argument to qlt_send_term_exchange() and free the memory here.
2729 */
2730 spin_lock_irqsave(&ha->hardware_lock, flags);
2731 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
2732 kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2733 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2734 if (sess)
2735 ha->tgt.tgt_ops->put_sess(sess);
2736}
2737
2738/* ha->hardware_lock supposed to be held on entry */
2739static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2740 struct atio_from_isp *atio)
2741{
2742 struct qla_hw_data *ha = vha->hw;
2743 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2744 struct qla_tgt_cmd *cmd;
2745
2746 if (unlikely(tgt->tgt_stop)) {
2747 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
2748 "New command while device %p is shutting down\n", tgt);
2749 return -EFAULT;
2750 }
2751
2752 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
2753 if (!cmd) {
2754 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
2755 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
2756 return -ENOMEM;
2757 }
2758
2759 INIT_LIST_HEAD(&cmd->cmd_list);
2760
2761 memcpy(&cmd->atio, atio, sizeof(*atio));
2762 cmd->state = QLA_TGT_STATE_NEW;
2763 cmd->tgt = ha->tgt.qla_tgt;
2764 cmd->vha = vha;
2765
2766 INIT_WORK(&cmd->work, qlt_do_work);
2767 queue_work(qla_tgt_wq, &cmd->work);
2768 return 0;
2769
2770}
2771
2772/* ha->hardware_lock supposed to be held on entry */
2773static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
2774 int fn, void *iocb, int flags)
2775{
2776 struct scsi_qla_host *vha = sess->vha;
2777 struct qla_hw_data *ha = vha->hw;
2778 struct qla_tgt_mgmt_cmd *mcmd;
2779 int res;
2780 uint8_t tmr_func;
2781
2782 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2783 if (!mcmd) {
2784 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
2785 "qla_target(%d): Allocation of management "
2786 "command failed, some commands and their data could "
2787 "leak\n", vha->vp_idx);
2788 return -ENOMEM;
2789 }
2790 memset(mcmd, 0, sizeof(*mcmd));
2791 mcmd->sess = sess;
2792
2793 if (iocb) {
2794 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2795 sizeof(mcmd->orig_iocb.imm_ntfy));
2796 }
2797 mcmd->tmr_func = fn;
2798 mcmd->flags = flags;
2799
2800 switch (fn) {
2801 case QLA_TGT_CLEAR_ACA:
2802 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
2803 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
2804 tmr_func = TMR_CLEAR_ACA;
2805 break;
2806
2807 case QLA_TGT_TARGET_RESET:
2808 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
2809 "qla_target(%d): TARGET_RESET received\n",
2810 sess->vha->vp_idx);
2811 tmr_func = TMR_TARGET_WARM_RESET;
2812 break;
2813
2814 case QLA_TGT_LUN_RESET:
2815 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
2816 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
2817 tmr_func = TMR_LUN_RESET;
2818 break;
2819
2820 case QLA_TGT_CLEAR_TS:
2821 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
2822 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
2823 tmr_func = TMR_CLEAR_TASK_SET;
2824 break;
2825
2826 case QLA_TGT_ABORT_TS:
2827 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
2828 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
2829 tmr_func = TMR_ABORT_TASK_SET;
2830 break;
2831#if 0
2832 case QLA_TGT_ABORT_ALL:
2833 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
2834 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
2835 sess->vha->vp_idx);
2836 tmr_func = 0;
2837 break;
2838
2839 case QLA_TGT_ABORT_ALL_SESS:
2840 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
2841 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
2842 sess->vha->vp_idx);
2843 tmr_func = 0;
2844 break;
2845
2846 case QLA_TGT_NEXUS_LOSS_SESS:
2847 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
2848 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
2849 sess->vha->vp_idx);
2850 tmr_func = 0;
2851 break;
2852
2853 case QLA_TGT_NEXUS_LOSS:
2854 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
2855 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
2856 tmr_func = 0;
2857 break;
2858#endif
2859 default:
2860 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
2861 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
2862 sess->vha->vp_idx, fn);
2863 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2864 return -ENOSYS;
2865 }
2866
2867 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
2868 if (res != 0) {
2869 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
2870 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
2871 sess->vha->vp_idx, res);
2872 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2873 return -EFAULT;
2874 }
2875
2876 return 0;
2877}
2878
2879/* ha->hardware_lock supposed to be held on entry */
2880static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
2881{
2882 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2883 struct qla_hw_data *ha = vha->hw;
2884 struct qla_tgt *tgt;
2885 struct qla_tgt_sess *sess;
2886 uint32_t lun, unpacked_lun;
2887 int lun_size, fn;
2888
2889 tgt = ha->tgt.qla_tgt;
2890
2891 lun = a->u.isp24.fcp_cmnd.lun;
2892 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
2893 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
2894 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2895 a->u.isp24.fcp_hdr.s_id);
2896 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2897
2898 if (!sess) {
2899 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
2900 "qla_target(%d): task mgmt fn 0x%x for "
2901 "non-existant session\n", vha->vp_idx, fn);
2902 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
2903 sizeof(struct atio_from_isp));
2904 }
2905
2906 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
2907}
2908
2909/* ha->hardware_lock supposed to be held on entry */
2910static int __qlt_abort_task(struct scsi_qla_host *vha,
2911 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
2912{
2913 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2914 struct qla_hw_data *ha = vha->hw;
2915 struct qla_tgt_mgmt_cmd *mcmd;
2916 uint32_t lun, unpacked_lun;
2917 int rc;
2918
2919 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2920 if (mcmd == NULL) {
2921 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
2922 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
2923 vha->vp_idx, __func__);
2924 return -ENOMEM;
2925 }
2926 memset(mcmd, 0, sizeof(*mcmd));
2927
2928 mcmd->sess = sess;
2929 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2930 sizeof(mcmd->orig_iocb.imm_ntfy));
2931
2932 lun = a->u.isp24.fcp_cmnd.lun;
2933 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2934
2935 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
2936 le16_to_cpu(iocb->u.isp2x.seq_id));
2937 if (rc != 0) {
2938 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
2939 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2940 vha->vp_idx, rc);
2941 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2942 return -EFAULT;
2943 }
2944
2945 return 0;
2946}
2947
2948/* ha->hardware_lock supposed to be held on entry */
2949static int qlt_abort_task(struct scsi_qla_host *vha,
2950 struct imm_ntfy_from_isp *iocb)
2951{
2952 struct qla_hw_data *ha = vha->hw;
2953 struct qla_tgt_sess *sess;
2954 int loop_id;
2955
2956 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
2957
2958 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
2959 if (sess == NULL) {
2960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
2961 "qla_target(%d): task abort for unexisting "
2962 "session\n", vha->vp_idx);
2963 return qlt_sched_sess_work(ha->tgt.qla_tgt,
2964 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
2965 }
2966
2967 return __qlt_abort_task(vha, iocb, sess);
2968}
2969
2970/*
2971 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2972 */
2973static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
2974 struct imm_ntfy_from_isp *iocb)
2975{
2976 struct qla_hw_data *ha = vha->hw;
2977 int res = 0;
2978
2979 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
2980 "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
2981 " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
2982 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
2983 iocb->u.isp24.status_subcode);
2984
2985 switch (iocb->u.isp24.status_subcode) {
2986 case ELS_PLOGI:
2987 case ELS_FLOGI:
2988 case ELS_PRLI:
2989 case ELS_LOGO:
2990 case ELS_PRLO:
2991 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
2992 break;
2993 case ELS_PDISC:
2994 case ELS_ADISC:
2995 {
2996 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2997 if (tgt->link_reinit_iocb_pending) {
2998 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
2999 0, 0, 0, 0, 0, 0);
3000 tgt->link_reinit_iocb_pending = 0;
3001 }
3002 res = 1; /* send notify ack */
3003 break;
3004 }
3005
3006 default:
3007 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3008 "qla_target(%d): Unsupported ELS command %x "
3009 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
3010 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3011 break;
3012 }
3013
3014 return res;
3015}
3016
3017static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3018{
3019 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
3020 size_t first_offset = 0, rem_offset = offset, tmp = 0;
3021 int i, sg_srr_cnt, bufflen = 0;
3022
3023 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
3024 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3025 "cmd->sg_cnt: %u, direction: %d\n",
3026 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3027
3028 /*
3029 * FIXME: Reject non zero SRR relative offset until we can test
3030 * this code properly.
3031 */
3032 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3033 return -1;
3034
3035 if (!cmd->sg || !cmd->sg_cnt) {
3036 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
3037 "Missing cmd->sg or zero cmd->sg_cnt in"
3038 " qla_tgt_set_data_offset\n");
3039 return -EINVAL;
3040 }
3041 /*
3042 * Walk the current cmd->sg list until we locate the new sg_srr_start
3043 */
3044 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
3045 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
3046 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3047 i, sg, sg_page(sg), sg->length, sg->offset);
3048
3049 if ((sg->length + tmp) > offset) {
3050 first_offset = rem_offset;
3051 sg_srr_start = sg;
3052 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
3053 "Found matching sg[%d], using %p as sg_srr_start, "
3054 "and using first_offset: %zu\n", i, sg,
3055 first_offset);
3056 break;
3057 }
3058 tmp += sg->length;
3059 rem_offset -= sg->length;
3060 }
3061
3062 if (!sg_srr_start) {
3063 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
3064 "Unable to locate sg_srr_start for offset: %u\n", offset);
3065 return -EINVAL;
3066 }
3067 sg_srr_cnt = (cmd->sg_cnt - i);
3068
3069 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
3070 if (!sg_srr) {
3071 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
3072 "Unable to allocate sgp\n");
3073 return -ENOMEM;
3074 }
3075 sg_init_table(sg_srr, sg_srr_cnt);
3076 sgp = &sg_srr[0];
3077 /*
3078 * Walk the remaining list for sg_srr_start, mapping to the newly
3079 * allocated sg_srr taking first_offset into account.
3080 */
3081 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
3082 if (first_offset) {
3083 sg_set_page(sgp, sg_page(sg),
3084 (sg->length - first_offset), first_offset);
3085 first_offset = 0;
3086 } else {
3087 sg_set_page(sgp, sg_page(sg), sg->length, 0);
3088 }
3089 bufflen += sgp->length;
3090
3091 sgp = sg_next(sgp);
3092 if (!sgp)
3093 break;
3094 }
3095
3096 cmd->sg = sg_srr;
3097 cmd->sg_cnt = sg_srr_cnt;
3098 cmd->bufflen = bufflen;
3099 cmd->offset += offset;
3100 cmd->free_sg = 1;
3101
3102 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
3103 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
3104 cmd->sg_cnt);
3105 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
3106 cmd->bufflen);
3107 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
3108 cmd->offset);
3109
3110 if (cmd->sg_cnt < 0)
3111 BUG();
3112
3113 if (cmd->bufflen < 0)
3114 BUG();
3115
3116 return 0;
3117}
3118
3119static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
3120 uint32_t srr_rel_offs, int *xmit_type)
3121{
3122 int res = 0, rel_offs;
3123
3124 rel_offs = srr_rel_offs - cmd->offset;
3125 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3126 srr_rel_offs, rel_offs);
3127
3128 *xmit_type = QLA_TGT_XMIT_ALL;
3129
3130 if (rel_offs < 0) {
3131 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
3132 "qla_target(%d): SRR rel_offs (%d) < 0",
3133 cmd->vha->vp_idx, rel_offs);
3134 res = -1;
3135 } else if (rel_offs == cmd->bufflen)
3136 *xmit_type = QLA_TGT_XMIT_STATUS;
3137 else if (rel_offs > 0)
3138 res = qlt_set_data_offset(cmd, rel_offs);
3139
3140 return res;
3141}
3142
3143/* No locks, thread context */
3144static void qlt_handle_srr(struct scsi_qla_host *vha,
3145 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
3146{
3147 struct imm_ntfy_from_isp *ntfy =
3148 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
3149 struct qla_hw_data *ha = vha->hw;
3150 struct qla_tgt_cmd *cmd = sctio->cmd;
3151 struct se_cmd *se_cmd = &cmd->se_cmd;
3152 unsigned long flags;
3153 int xmit_type = 0, resp = 0;
3154 uint32_t offset;
3155 uint16_t srr_ui;
3156
3157 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
3158 srr_ui = ntfy->u.isp24.srr_ui;
3159
3160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
3161 cmd, srr_ui);
3162
3163 switch (srr_ui) {
3164 case SRR_IU_STATUS:
3165 spin_lock_irqsave(&ha->hardware_lock, flags);
3166 qlt_send_notify_ack(vha, ntfy,
3167 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3168 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3169 xmit_type = QLA_TGT_XMIT_STATUS;
3170 resp = 1;
3171 break;
3172 case SRR_IU_DATA_IN:
3173 if (!cmd->sg || !cmd->sg_cnt) {
3174 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
3175 "Unable to process SRR_IU_DATA_IN due to"
3176 " missing cmd->sg, state: %d\n", cmd->state);
3177 dump_stack();
3178 goto out_reject;
3179 }
3180 if (se_cmd->scsi_status != 0) {
3181 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
3182 "Rejecting SRR_IU_DATA_IN with non GOOD "
3183 "scsi_status\n");
3184 goto out_reject;
3185 }
3186 cmd->bufflen = se_cmd->data_length;
3187
3188 if (qlt_has_data(cmd)) {
3189 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3190 goto out_reject;
3191 spin_lock_irqsave(&ha->hardware_lock, flags);
3192 qlt_send_notify_ack(vha, ntfy,
3193 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3194 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3195 resp = 1;
3196 } else {
3197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
3198 "qla_target(%d): SRR for in data for cmd "
3199 "without them (tag %d, SCSI status %d), "
3200 "reject", vha->vp_idx, cmd->tag,
3201 cmd->se_cmd.scsi_status);
3202 goto out_reject;
3203 }
3204 break;
3205 case SRR_IU_DATA_OUT:
3206 if (!cmd->sg || !cmd->sg_cnt) {
3207 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
3208 "Unable to process SRR_IU_DATA_OUT due to"
3209 " missing cmd->sg\n");
3210 dump_stack();
3211 goto out_reject;
3212 }
3213 if (se_cmd->scsi_status != 0) {
3214 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
3215 "Rejecting SRR_IU_DATA_OUT"
3216 " with non GOOD scsi_status\n");
3217 goto out_reject;
3218 }
3219 cmd->bufflen = se_cmd->data_length;
3220
3221 if (qlt_has_data(cmd)) {
3222 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3223 goto out_reject;
3224 spin_lock_irqsave(&ha->hardware_lock, flags);
3225 qlt_send_notify_ack(vha, ntfy,
3226 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3227 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3228 if (xmit_type & QLA_TGT_XMIT_DATA)
3229 qlt_rdy_to_xfer(cmd);
3230 } else {
3231 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3232 "qla_target(%d): SRR for out data for cmd "
3233 "without them (tag %d, SCSI status %d), "
3234 "reject", vha->vp_idx, cmd->tag,
3235 cmd->se_cmd.scsi_status);
3236 goto out_reject;
3237 }
3238 break;
3239 default:
3240 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
3241 "qla_target(%d): Unknown srr_ui value %x",
3242 vha->vp_idx, srr_ui);
3243 goto out_reject;
3244 }
3245
3246 /* Transmit response in case of status and data-in cases */
3247 if (resp)
3248 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
3249
3250 return;
3251
3252out_reject:
3253 spin_lock_irqsave(&ha->hardware_lock, flags);
3254 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
3255 NOTIFY_ACK_SRR_FLAGS_REJECT,
3256 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3257 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3258 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3259 cmd->state = QLA_TGT_STATE_DATA_IN;
3260 dump_stack();
3261 } else
3262 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3263 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3264}
3265
3266static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
3267 struct qla_tgt_srr_imm *imm, int ha_locked)
3268{
3269 struct qla_hw_data *ha = vha->hw;
3270 unsigned long flags = 0;
3271
3272 if (!ha_locked)
3273 spin_lock_irqsave(&ha->hardware_lock, flags);
3274
3275 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
3276 NOTIFY_ACK_SRR_FLAGS_REJECT,
3277 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3278 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3279
3280 if (!ha_locked)
3281 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3282
3283 kfree(imm);
3284}
3285
3286static void qlt_handle_srr_work(struct work_struct *work)
3287{
3288 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
3289 struct scsi_qla_host *vha = tgt->vha;
3290 struct qla_tgt_srr_ctio *sctio;
3291 unsigned long flags;
3292
3293 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
3294 tgt);
3295
3296restart:
3297 spin_lock_irqsave(&tgt->srr_lock, flags);
3298 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
3299 struct qla_tgt_srr_imm *imm, *i, *ti;
3300 struct qla_tgt_cmd *cmd;
3301 struct se_cmd *se_cmd;
3302
3303 imm = NULL;
3304 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
3305 srr_list_entry) {
3306 if (i->srr_id == sctio->srr_id) {
3307 list_del(&i->srr_list_entry);
3308 if (imm) {
3309 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
3310 "qla_target(%d): There must be "
3311 "only one IMM SRR per CTIO SRR "
3312 "(IMM SRR %p, id %d, CTIO %p\n",
3313 vha->vp_idx, i, i->srr_id, sctio);
3314 qlt_reject_free_srr_imm(tgt->vha, i, 0);
3315 } else
3316 imm = i;
3317 }
3318 }
3319
3320 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
3321 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
3322 sctio->srr_id);
3323
3324 if (imm == NULL) {
3325 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
3326 "Not found matching IMM for SRR CTIO (id %d)\n",
3327 sctio->srr_id);
3328 continue;
3329 } else
3330 list_del(&sctio->srr_list_entry);
3331
3332 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3333
3334 cmd = sctio->cmd;
3335 /*
3336 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
3337 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
3338 * logic..
3339 */
3340 cmd->offset = 0;
3341 if (cmd->free_sg) {
3342 kfree(cmd->sg);
3343 cmd->sg = NULL;
3344 cmd->free_sg = 0;
3345 }
3346 se_cmd = &cmd->se_cmd;
3347
3348 cmd->sg_cnt = se_cmd->t_data_nents;
3349 cmd->sg = se_cmd->t_data_sg;
3350
3351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
3352 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3353 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
3354 se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
3355
3356 qlt_handle_srr(vha, sctio, imm);
3357
3358 kfree(imm);
3359 kfree(sctio);
3360 goto restart;
3361 }
3362 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3363}
3364
3365/* ha->hardware_lock supposed to be held on entry */
3366static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
3367 struct imm_ntfy_from_isp *iocb)
3368{
3369 struct qla_tgt_srr_imm *imm;
3370 struct qla_hw_data *ha = vha->hw;
3371 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3372 struct qla_tgt_srr_ctio *sctio;
3373
3374 tgt->imm_srr_id++;
3375
3376 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
3377 vha->vp_idx);
3378
3379 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
3380 if (imm != NULL) {
3381 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
3382
3383 /* IRQ is already OFF */
3384 spin_lock(&tgt->srr_lock);
3385 imm->srr_id = tgt->imm_srr_id;
3386 list_add_tail(&imm->srr_list_entry,
3387 &tgt->srr_imm_list);
3388 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
3389 "IMM NTFY SRR %p added (id %d, ui %x)\n",
3390 imm, imm->srr_id, iocb->u.isp24.srr_ui);
3391 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3392 int found = 0;
3393 list_for_each_entry(sctio, &tgt->srr_ctio_list,
3394 srr_list_entry) {
3395 if (sctio->srr_id == imm->srr_id) {
3396 found = 1;
3397 break;
3398 }
3399 }
3400 if (found) {
3401 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
3402 "Scheduling srr work\n");
3403 schedule_work(&tgt->srr_work);
3404 } else {
3405 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
3406 "qla_target(%d): imm_srr_id "
3407 "== ctio_srr_id (%d), but there is no "
3408 "corresponding SRR CTIO, deleting IMM "
3409 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
3410 imm);
3411 list_del(&imm->srr_list_entry);
3412
3413 kfree(imm);
3414
3415 spin_unlock(&tgt->srr_lock);
3416 goto out_reject;
3417 }
3418 }
3419 spin_unlock(&tgt->srr_lock);
3420 } else {
3421 struct qla_tgt_srr_ctio *ts;
3422
3423 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
3424 "qla_target(%d): Unable to allocate SRR IMM "
3425 "entry, SRR request will be rejected\n", vha->vp_idx);
3426
3427 /* IRQ is already OFF */
3428 spin_lock(&tgt->srr_lock);
3429 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
3430 srr_list_entry) {
3431 if (sctio->srr_id == tgt->imm_srr_id) {
3432 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
3433 "CTIO SRR %p deleted (id %d)\n",
3434 sctio, sctio->srr_id);
3435 list_del(&sctio->srr_list_entry);
3436 qlt_send_term_exchange(vha, sctio->cmd,
3437 &sctio->cmd->atio, 1);
3438 kfree(sctio);
3439 }
3440 }
3441 spin_unlock(&tgt->srr_lock);
3442 goto out_reject;
3443 }
3444
3445 return;
3446
3447out_reject:
3448 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
3449 NOTIFY_ACK_SRR_FLAGS_REJECT,
3450 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3451 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3452}
3453
3454/*
3455 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3456 */
3457static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
3458 struct imm_ntfy_from_isp *iocb)
3459{
3460 struct qla_hw_data *ha = vha->hw;
3461 uint32_t add_flags = 0;
3462 int send_notify_ack = 1;
3463 uint16_t status;
3464
3465 status = le16_to_cpu(iocb->u.isp2x.status);
3466 switch (status) {
3467 case IMM_NTFY_LIP_RESET:
3468 {
3469 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
3470 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
3471 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
3472 iocb->u.isp24.status_subcode);
3473
3474 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3475 send_notify_ack = 0;
3476 break;
3477 }
3478
3479 case IMM_NTFY_LIP_LINK_REINIT:
3480 {
3481 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3482 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
3483 "qla_target(%d): LINK REINIT (loop %#x, "
3484 "subcode %x)\n", vha->vp_idx,
3485 le16_to_cpu(iocb->u.isp24.nport_handle),
3486 iocb->u.isp24.status_subcode);
3487 if (tgt->link_reinit_iocb_pending) {
3488 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3489 0, 0, 0, 0, 0, 0);
3490 }
3491 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
3492 tgt->link_reinit_iocb_pending = 1;
3493 /*
3494 * QLogic requires to wait after LINK REINIT for possible
3495 * PDISC or ADISC ELS commands
3496 */
3497 send_notify_ack = 0;
3498 break;
3499 }
3500
3501 case IMM_NTFY_PORT_LOGOUT:
3502 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
3503 "qla_target(%d): Port logout (loop "
3504 "%#x, subcode %x)\n", vha->vp_idx,
3505 le16_to_cpu(iocb->u.isp24.nport_handle),
3506 iocb->u.isp24.status_subcode);
3507
3508 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
3509 send_notify_ack = 0;
3510 /* The sessions will be cleared in the callback, if needed */
3511 break;
3512
3513 case IMM_NTFY_GLBL_TPRLO:
3514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
3515 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
3516 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3517 send_notify_ack = 0;
3518 /* The sessions will be cleared in the callback, if needed */
3519 break;
3520
3521 case IMM_NTFY_PORT_CONFIG:
3522 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
3523 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
3524 status);
3525 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3526 send_notify_ack = 0;
3527 /* The sessions will be cleared in the callback, if needed */
3528 break;
3529
3530 case IMM_NTFY_GLBL_LOGO:
3531 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
3532 "qla_target(%d): Link failure detected\n",
3533 vha->vp_idx);
3534 /* I_T nexus loss */
3535 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3536 send_notify_ack = 0;
3537 break;
3538
3539 case IMM_NTFY_IOCB_OVERFLOW:
3540 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
3541 "qla_target(%d): Cannot provide requested "
3542 "capability (IOCB overflowed the immediate notify "
3543 "resource count)\n", vha->vp_idx);
3544 break;
3545
3546 case IMM_NTFY_ABORT_TASK:
3547 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
3548 "qla_target(%d): Abort Task (S %08x I %#x -> "
3549 "L %#x)\n", vha->vp_idx,
3550 le16_to_cpu(iocb->u.isp2x.seq_id),
3551 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
3552 le16_to_cpu(iocb->u.isp2x.lun));
3553 if (qlt_abort_task(vha, iocb) == 0)
3554 send_notify_ack = 0;
3555 break;
3556
3557 case IMM_NTFY_RESOURCE:
3558 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
3559 "qla_target(%d): Out of resources, host %ld\n",
3560 vha->vp_idx, vha->host_no);
3561 break;
3562
3563 case IMM_NTFY_MSG_RX:
3564 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
3565 "qla_target(%d): Immediate notify task %x\n",
3566 vha->vp_idx, iocb->u.isp2x.task_flags);
3567 if (qlt_handle_task_mgmt(vha, iocb) == 0)
3568 send_notify_ack = 0;
3569 break;
3570
3571 case IMM_NTFY_ELS:
3572 if (qlt_24xx_handle_els(vha, iocb) == 0)
3573 send_notify_ack = 0;
3574 break;
3575
3576 case IMM_NTFY_SRR:
3577 qlt_prepare_srr_imm(vha, iocb);
3578 send_notify_ack = 0;
3579 break;
3580
3581 default:
3582 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
3583 "qla_target(%d): Received unknown immediate "
3584 "notify status %x\n", vha->vp_idx, status);
3585 break;
3586 }
3587
3588 if (send_notify_ack)
3589 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
3590}
3591
3592/*
3593 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3594 * This function sends busy to ISP 2xxx or 24xx.
3595 */
3596static void qlt_send_busy(struct scsi_qla_host *vha,
3597 struct atio_from_isp *atio, uint16_t status)
3598{
3599 struct ctio7_to_24xx *ctio24;
3600 struct qla_hw_data *ha = vha->hw;
3601 request_t *pkt;
3602 struct qla_tgt_sess *sess = NULL;
3603
3604 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3605 atio->u.isp24.fcp_hdr.s_id);
3606 if (!sess) {
3607 qlt_send_term_exchange(vha, NULL, atio, 1);
3608 return;
3609 }
3610 /* Sending marker isn't necessary, since we called from ISR */
3611
3612 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3613 if (!pkt) {
3614 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
3615 "qla_target(%d): %s failed: unable to allocate "
3616 "request packet", vha->vp_idx, __func__);
3617 return;
3618 }
3619
3620 pkt->entry_count = 1;
3621 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3622
3623 ctio24 = (struct ctio7_to_24xx *)pkt;
3624 ctio24->entry_type = CTIO_TYPE7;
3625 ctio24->nport_handle = sess->loop_id;
3626 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
3627 ctio24->vp_index = vha->vp_idx;
3628 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3629 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3630 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3631 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3632 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
3633 __constant_cpu_to_le16(
3634 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
3635 CTIO7_FLAGS_DONT_RET_CTIO);
3636 /*
3637 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
3638 * if the explicit conformation is used.
3639 */
3640 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
3641 ctio24->u.status1.scsi_status = cpu_to_le16(status);
3642 ctio24->u.status1.residual = get_unaligned((uint32_t *)
3643 &atio->u.isp24.fcp_cmnd.add_cdb[
3644 atio->u.isp24.fcp_cmnd.add_cdb_len]);
3645 if (ctio24->u.status1.residual != 0)
3646 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3647
3648 qla2x00_start_iocbs(vha, vha->req);
3649}
3650
3651/* ha->hardware_lock supposed to be held on entry */
3652/* called via callback from qla2xxx */
3653static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
3654 struct atio_from_isp *atio)
3655{
3656 struct qla_hw_data *ha = vha->hw;
3657 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3658 int rc;
3659
3660 if (unlikely(tgt == NULL)) {
3661 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
3662 "ATIO pkt, but no tgt (ha %p)", ha);
3663 return;
3664 }
3665 ql_dbg(ql_dbg_tgt, vha, 0xe02c,
3666 "qla_target(%d): ATIO pkt %p: type %02x count %02x",
3667 vha->vp_idx, atio, atio->u.raw.entry_type,
3668 atio->u.raw.entry_count);
3669 /*
3670 * In tgt_stop mode we also should allow all requests to pass.
3671 * Otherwise, some commands can stuck.
3672 */
3673
3674 tgt->irq_cmd_count++;
3675
3676 switch (atio->u.raw.entry_type) {
3677 case ATIO_TYPE7:
3678 ql_dbg(ql_dbg_tgt, vha, 0xe02d,
3679 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
3680 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
3681 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
3682 atio->u.isp24.fcp_cmnd.rddata,
3683 atio->u.isp24.fcp_cmnd.wrdata,
3684 atio->u.isp24.fcp_cmnd.add_cdb_len,
3685 be32_to_cpu(get_unaligned((uint32_t *)
3686 &atio->u.isp24.fcp_cmnd.add_cdb[
3687 atio->u.isp24.fcp_cmnd.add_cdb_len])),
3688 atio->u.isp24.fcp_hdr.s_id[0],
3689 atio->u.isp24.fcp_hdr.s_id[1],
3690 atio->u.isp24.fcp_hdr.s_id[2]);
3691
3692 if (unlikely(atio->u.isp24.exchange_addr ==
3693 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
3694 ql_dbg(ql_dbg_tgt, vha, 0xe058,
3695 "qla_target(%d): ATIO_TYPE7 "
3696 "received with UNKNOWN exchange address, "
3697 "sending QUEUE_FULL\n", vha->vp_idx);
3698 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
3699 break;
3700 }
3701 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
3702 rc = qlt_handle_cmd_for_atio(vha, atio);
3703 else
3704 rc = qlt_handle_task_mgmt(vha, atio);
3705 if (unlikely(rc != 0)) {
3706 if (rc == -ESRCH) {
3707#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3708 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3709#else
3710 qlt_send_term_exchange(vha, NULL, atio, 1);
3711#endif
3712 } else {
3713 if (tgt->tgt_stop) {
3714 ql_dbg(ql_dbg_tgt, vha, 0xe059,
3715 "qla_target: Unable to send "
3716 "command to target for req, "
3717 "ignoring.\n");
3718 } else {
3719 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
3720 "qla_target(%d): Unable to send "
3721 "command to target, sending BUSY "
3722 "status.\n", vha->vp_idx);
3723 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3724 }
3725 }
3726 }
3727 break;
3728
3729 case IMMED_NOTIFY_TYPE:
3730 {
3731 if (unlikely(atio->u.isp2x.entry_status != 0)) {
3732 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
3733 "qla_target(%d): Received ATIO packet %x "
3734 "with error status %x\n", vha->vp_idx,
3735 atio->u.raw.entry_type,
3736 atio->u.isp2x.entry_status);
3737 break;
3738 }
3739 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
3740 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
3741 break;
3742 }
3743
3744 default:
3745 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
3746 "qla_target(%d): Received unknown ATIO atio "
3747 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
3748 break;
3749 }
3750
3751 tgt->irq_cmd_count--;
3752}
3753
3754/* ha->hardware_lock supposed to be held on entry */
3755/* called via callback from qla2xxx */
3756static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
3757{
3758 struct qla_hw_data *ha = vha->hw;
3759 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3760
3761 if (unlikely(tgt == NULL)) {
3762 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
3763 "qla_target(%d): Response pkt %x received, but no "
3764 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
3765 return;
3766 }
3767
3768 ql_dbg(ql_dbg_tgt, vha, 0xe02f,
3769 "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
3770 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
3771 pkt->entry_count, pkt->entry_status, pkt->handle);
3772
3773 /*
3774 * In tgt_stop mode we also should allow all requests to pass.
3775 * Otherwise, some commands can stuck.
3776 */
3777
3778 tgt->irq_cmd_count++;
3779
3780 switch (pkt->entry_type) {
3781 case CTIO_TYPE7:
3782 {
3783 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
3784 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
3785 vha->vp_idx);
3786 qlt_do_ctio_completion(vha, entry->handle,
3787 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3788 entry);
3789 break;
3790 }
3791
3792 case ACCEPT_TGT_IO_TYPE:
3793 {
3794 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
3795 int rc;
3796 ql_dbg(ql_dbg_tgt, vha, 0xe031,
3797 "ACCEPT_TGT_IO instance %d status %04x "
3798 "lun %04x read/write %d data_length %04x "
3799 "target_id %02x rx_id %04x\n ", vha->vp_idx,
3800 le16_to_cpu(atio->u.isp2x.status),
3801 le16_to_cpu(atio->u.isp2x.lun),
3802 atio->u.isp2x.execution_codes,
3803 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
3804 atio), atio->u.isp2x.rx_id);
3805 if (atio->u.isp2x.status !=
3806 __constant_cpu_to_le16(ATIO_CDB_VALID)) {
3807 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
3808 "qla_target(%d): ATIO with error "
3809 "status %x received\n", vha->vp_idx,
3810 le16_to_cpu(atio->u.isp2x.status));
3811 break;
3812 }
3813 ql_dbg(ql_dbg_tgt, vha, 0xe032,
3814 "FCP CDB: 0x%02x, sizeof(cdb): %lu",
3815 atio->u.isp2x.cdb[0], (unsigned long
3816 int)sizeof(atio->u.isp2x.cdb));
3817
3818 rc = qlt_handle_cmd_for_atio(vha, atio);
3819 if (unlikely(rc != 0)) {
3820 if (rc == -ESRCH) {
3821#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3822 qlt_send_busy(vha, atio, 0);
3823#else
3824 qlt_send_term_exchange(vha, NULL, atio, 1);
3825#endif
3826 } else {
3827 if (tgt->tgt_stop) {
3828 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
3829 "qla_target: Unable to send "
3830 "command to target, sending TERM "
3831 "EXCHANGE for rsp\n");
3832 qlt_send_term_exchange(vha, NULL,
3833 atio, 1);
3834 } else {
3835 ql_dbg(ql_dbg_tgt, vha, 0xe060,
3836 "qla_target(%d): Unable to send "
3837 "command to target, sending BUSY "
3838 "status\n", vha->vp_idx);
3839 qlt_send_busy(vha, atio, 0);
3840 }
3841 }
3842 }
3843 }
3844 break;
3845
3846 case CONTINUE_TGT_IO_TYPE:
3847 {
3848 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3849 ql_dbg(ql_dbg_tgt, vha, 0xe033,
3850 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
3851 qlt_do_ctio_completion(vha, entry->handle,
3852 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3853 entry);
3854 break;
3855 }
3856
3857 case CTIO_A64_TYPE:
3858 {
3859 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3860 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
3861 vha->vp_idx);
3862 qlt_do_ctio_completion(vha, entry->handle,
3863 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3864 entry);
3865 break;
3866 }
3867
3868 case IMMED_NOTIFY_TYPE:
3869 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
3870 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
3871 break;
3872
3873 case NOTIFY_ACK_TYPE:
3874 if (tgt->notify_ack_expected > 0) {
3875 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
3876 ql_dbg(ql_dbg_tgt, vha, 0xe036,
3877 "NOTIFY_ACK seq %08x status %x\n",
3878 le16_to_cpu(entry->u.isp2x.seq_id),
3879 le16_to_cpu(entry->u.isp2x.status));
3880 tgt->notify_ack_expected--;
3881 if (entry->u.isp2x.status !=
3882 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
3883 ql_dbg(ql_dbg_tgt, vha, 0xe061,
3884 "qla_target(%d): NOTIFY_ACK "
3885 "failed %x\n", vha->vp_idx,
3886 le16_to_cpu(entry->u.isp2x.status));
3887 }
3888 } else {
3889 ql_dbg(ql_dbg_tgt, vha, 0xe062,
3890 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
3891 vha->vp_idx);
3892 }
3893 break;
3894
3895 case ABTS_RECV_24XX:
3896 ql_dbg(ql_dbg_tgt, vha, 0xe037,
3897 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
3898 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
3899 break;
3900
3901 case ABTS_RESP_24XX:
3902 if (tgt->abts_resp_expected > 0) {
3903 struct abts_resp_from_24xx_fw *entry =
3904 (struct abts_resp_from_24xx_fw *)pkt;
3905 ql_dbg(ql_dbg_tgt, vha, 0xe038,
3906 "ABTS_RESP_24XX: compl_status %x\n",
3907 entry->compl_status);
3908 tgt->abts_resp_expected--;
3909 if (le16_to_cpu(entry->compl_status) !=
3910 ABTS_RESP_COMPL_SUCCESS) {
3911 if ((entry->error_subcode1 == 0x1E) &&
3912 (entry->error_subcode2 == 0)) {
3913 /*
3914 * We've got a race here: aborted
3915 * exchange not terminated, i.e.
3916 * response for the aborted command was
3917 * sent between the abort request was
3918 * received and processed.
3919 * Unfortunately, the firmware has a
3920 * silly requirement that all aborted
3921 * exchanges must be explicitely
3922 * terminated, otherwise it refuses to
3923 * send responses for the abort
3924 * requests. So, we have to
3925 * (re)terminate the exchange and retry
3926 * the abort response.
3927 */
3928 qlt_24xx_retry_term_exchange(vha,
3929 entry);
3930 } else
3931 ql_dbg(ql_dbg_tgt, vha, 0xe063,
3932 "qla_target(%d): ABTS_RESP_24XX "
3933 "failed %x (subcode %x:%x)",
3934 vha->vp_idx, entry->compl_status,
3935 entry->error_subcode1,
3936 entry->error_subcode2);
3937 }
3938 } else {
3939 ql_dbg(ql_dbg_tgt, vha, 0xe064,
3940 "qla_target(%d): Unexpected ABTS_RESP_24XX "
3941 "received\n", vha->vp_idx);
3942 }
3943 break;
3944
3945 default:
3946 ql_dbg(ql_dbg_tgt, vha, 0xe065,
3947 "qla_target(%d): Received unknown response pkt "
3948 "type %x\n", vha->vp_idx, pkt->entry_type);
3949 break;
3950 }
3951
3952 tgt->irq_cmd_count--;
3953}
3954
3955/*
3956 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3957 */
3958void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
3959 uint16_t *mailbox)
3960{
3961 struct qla_hw_data *ha = vha->hw;
3962 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3963 int reason_code;
3964
3965 ql_dbg(ql_dbg_tgt, vha, 0xe039,
3966 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
3967 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
3968 ha->operating_mode, ha->current_topology);
3969
3970 if (!ha->tgt.tgt_ops)
3971 return;
3972
3973 if (unlikely(tgt == NULL)) {
3974 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
3975 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
3976 return;
3977 }
3978
3979 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
3980 IS_QLA2100(ha))
3981 return;
3982 /*
3983 * In tgt_stop mode we also should allow all requests to pass.
3984 * Otherwise, some commands can stuck.
3985 */
3986
3987 tgt->irq_cmd_count++;
3988
3989 switch (code) {
3990 case MBA_RESET: /* Reset */
3991 case MBA_SYSTEM_ERR: /* System Error */
3992 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3993 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3994 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
3995 "qla_target(%d): System error async event %#x "
3996 "occured", vha->vp_idx, code);
3997 break;
3998 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
3999 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4000 break;
4001
4002 case MBA_LOOP_UP:
4003 {
4004 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
4005 "qla_target(%d): Async LOOP_UP occured "
4006 "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
4007 le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
4008 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
4009 if (tgt->link_reinit_iocb_pending) {
4010 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
4011 0, 0, 0, 0, 0, 0);
4012 tgt->link_reinit_iocb_pending = 0;
4013 }
4014 break;
4015 }
4016
4017 case MBA_LIP_OCCURRED:
4018 case MBA_LOOP_DOWN:
4019 case MBA_LIP_RESET:
4020 case MBA_RSCN_UPDATE:
4021 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
4022 "qla_target(%d): Async event %#x occured "
4023 "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code,
4024 le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
4025 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
4026 break;
4027
4028 case MBA_PORT_UPDATE:
4029 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
4030 "qla_target(%d): Port update async event %#x "
4031 "occured: updating the ports database (m[1]=%x, m[2]=%x, "
4032 "m[3]=%x, m[4]=%x)", vha->vp_idx, code,
4033 le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
4034 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
4035 reason_code = le16_to_cpu(mailbox[2]);
4036 if (reason_code == 0x4)
4037 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
4038 "Async MB 2: Got PLOGI Complete\n");
4039 else if (reason_code == 0x7)
4040 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
4041 "Async MB 2: Port Logged Out\n");
4042 break;
4043
4044 default:
4045 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
4046 "qla_target(%d): Async event %#x occured: "
4047 "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
4048 code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
4049 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
4050 break;
4051 }
4052
4053 tgt->irq_cmd_count--;
4054}
4055
4056static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4057 uint16_t loop_id)
4058{
4059 fc_port_t *fcport;
4060 int rc;
4061
4062 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4063 if (!fcport) {
4064 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4065 "qla_target(%d): Allocation of tmp FC port failed",
4066 vha->vp_idx);
4067 return NULL;
4068 }
4069
4070 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
4071
4072 fcport->loop_id = loop_id;
4073
4074 rc = qla2x00_get_port_database(vha, fcport, 0);
4075 if (rc != QLA_SUCCESS) {
4076 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
4077 "qla_target(%d): Failed to retrieve fcport "
4078 "information -- get_port_database() returned %x "
4079 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
4080 kfree(fcport);
4081 return NULL;
4082 }
4083
4084 return fcport;
4085}
4086
4087/* Must be called under tgt_mutex */
4088static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
4089 uint8_t *s_id)
4090{
4091 struct qla_hw_data *ha = vha->hw;
4092 struct qla_tgt_sess *sess = NULL;
4093 fc_port_t *fcport = NULL;
4094 int rc, global_resets;
4095 uint16_t loop_id = 0;
4096
4097retry:
4098 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
4099
4100 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
4101 if (rc != 0) {
4102 if ((s_id[0] == 0xFF) &&
4103 (s_id[1] == 0xFC)) {
4104 /*
4105 * This is Domain Controller, so it should be
4106 * OK to drop SCSI commands from it.
4107 */
4108 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
4109 "Unable to find initiator with S_ID %x:%x:%x",
4110 s_id[0], s_id[1], s_id[2]);
4111 } else
4112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
4113 "qla_target(%d): Unable to find "
4114 "initiator with S_ID %x:%x:%x",
4115 vha->vp_idx, s_id[0], s_id[1],
4116 s_id[2]);
4117 return NULL;
4118 }
4119
4120 fcport = qlt_get_port_database(vha, loop_id);
4121 if (!fcport)
4122 return NULL;
4123
4124 if (global_resets !=
4125 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
4126 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
4127 "qla_target(%d): global reset during session discovery "
4128 "(counter was %d, new %d), retrying", vha->vp_idx,
4129 global_resets,
4130 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
4131 goto retry;
4132 }
4133
4134 sess = qlt_create_sess(vha, fcport, true);
4135
4136 kfree(fcport);
4137 return sess;
4138}
4139
4140static void qlt_abort_work(struct qla_tgt *tgt,
4141 struct qla_tgt_sess_work_param *prm)
4142{
4143 struct scsi_qla_host *vha = tgt->vha;
4144 struct qla_hw_data *ha = vha->hw;
4145 struct qla_tgt_sess *sess = NULL;
4146 unsigned long flags;
4147 uint32_t be_s_id;
4148 uint8_t s_id[3];
4149 int rc;
4150
4151 spin_lock_irqsave(&ha->hardware_lock, flags);
4152
4153 if (tgt->tgt_stop)
4154 goto out_term;
4155
4156 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
4157 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
4158 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
4159
4160 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4161 (unsigned char *)&be_s_id);
4162 if (!sess) {
4163 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4164
4165 mutex_lock(&ha->tgt.tgt_mutex);
4166 sess = qlt_make_local_sess(vha, s_id);
4167 /* sess has got an extra creation ref */
4168 mutex_unlock(&ha->tgt.tgt_mutex);
4169
4170 spin_lock_irqsave(&ha->hardware_lock, flags);
4171 if (!sess)
4172 goto out_term;
4173 } else {
4174 kref_get(&sess->se_sess->sess_kref);
4175 }
4176
4177 if (tgt->tgt_stop)
4178 goto out_term;
4179
4180 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
4181 if (rc != 0)
4182 goto out_term;
4183 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4184
4185 ha->tgt.tgt_ops->put_sess(sess);
4186 return;
4187
4188out_term:
4189 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
4190 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4191 if (sess)
4192 ha->tgt.tgt_ops->put_sess(sess);
4193}
4194
4195static void qlt_tmr_work(struct qla_tgt *tgt,
4196 struct qla_tgt_sess_work_param *prm)
4197{
4198 struct atio_from_isp *a = &prm->tm_iocb2;
4199 struct scsi_qla_host *vha = tgt->vha;
4200 struct qla_hw_data *ha = vha->hw;
4201 struct qla_tgt_sess *sess = NULL;
4202 unsigned long flags;
4203 uint8_t *s_id = NULL; /* to hide compiler warnings */
4204 int rc;
4205 uint32_t lun, unpacked_lun;
4206 int lun_size, fn;
4207 void *iocb;
4208
4209 spin_lock_irqsave(&ha->hardware_lock, flags);
4210
4211 if (tgt->tgt_stop)
4212 goto out_term;
4213
4214 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
4215 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
4216 if (!sess) {
4217 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4218
4219 mutex_lock(&ha->tgt.tgt_mutex);
4220 sess = qlt_make_local_sess(vha, s_id);
4221 /* sess has got an extra creation ref */
4222 mutex_unlock(&ha->tgt.tgt_mutex);
4223
4224 spin_lock_irqsave(&ha->hardware_lock, flags);
4225 if (!sess)
4226 goto out_term;
4227 } else {
4228 kref_get(&sess->se_sess->sess_kref);
4229 }
4230
4231 iocb = a;
4232 lun = a->u.isp24.fcp_cmnd.lun;
4233 lun_size = sizeof(lun);
4234 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4235 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4236
4237 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4238 if (rc != 0)
4239 goto out_term;
4240 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4241
4242 ha->tgt.tgt_ops->put_sess(sess);
4243 return;
4244
4245out_term:
4246 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
4247 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4248 if (sess)
4249 ha->tgt.tgt_ops->put_sess(sess);
4250}
4251
4252static void qlt_sess_work_fn(struct work_struct *work)
4253{
4254 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
4255 struct scsi_qla_host *vha = tgt->vha;
4256 unsigned long flags;
4257
4258 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
4259
4260 spin_lock_irqsave(&tgt->sess_work_lock, flags);
4261 while (!list_empty(&tgt->sess_works_list)) {
4262 struct qla_tgt_sess_work_param *prm = list_entry(
4263 tgt->sess_works_list.next, typeof(*prm),
4264 sess_works_list_entry);
4265
4266 /*
4267 * This work can be scheduled on several CPUs at time, so we
4268 * must delete the entry to eliminate double processing
4269 */
4270 list_del(&prm->sess_works_list_entry);
4271
4272 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4273
4274 switch (prm->type) {
4275 case QLA_TGT_SESS_WORK_ABORT:
4276 qlt_abort_work(tgt, prm);
4277 break;
4278 case QLA_TGT_SESS_WORK_TM:
4279 qlt_tmr_work(tgt, prm);
4280 break;
4281 default:
4282 BUG_ON(1);
4283 break;
4284 }
4285
4286 spin_lock_irqsave(&tgt->sess_work_lock, flags);
4287
4288 kfree(prm);
4289 }
4290 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4291}
4292
4293/* Must be called under tgt_host_action_mutex */
4294int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4295{
4296 struct qla_tgt *tgt;
4297
4298 if (!QLA_TGT_MODE_ENABLED())
4299 return 0;
4300
4301 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
4302 "Registering target for host %ld(%p)", base_vha->host_no, ha);
4303
4304 BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
4305
4306 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
4307 if (!tgt) {
4308 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
4309 "Unable to allocate struct qla_tgt\n");
4310 return -ENOMEM;
4311 }
4312
4313 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
4314 base_vha->host->hostt->supported_mode |= MODE_TARGET;
4315
4316 tgt->ha = ha;
4317 tgt->vha = base_vha;
4318 init_waitqueue_head(&tgt->waitQ);
4319 INIT_LIST_HEAD(&tgt->sess_list);
4320 INIT_LIST_HEAD(&tgt->del_sess_list);
4321 INIT_DELAYED_WORK(&tgt->sess_del_work,
4322 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
4323 spin_lock_init(&tgt->sess_work_lock);
4324 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
4325 INIT_LIST_HEAD(&tgt->sess_works_list);
4326 spin_lock_init(&tgt->srr_lock);
4327 INIT_LIST_HEAD(&tgt->srr_ctio_list);
4328 INIT_LIST_HEAD(&tgt->srr_imm_list);
4329 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
4330 atomic_set(&tgt->tgt_global_resets_count, 0);
4331
4332 ha->tgt.qla_tgt = tgt;
4333
4334 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
4335 "qla_target(%d): using 64 Bit PCI addressing",
4336 base_vha->vp_idx);
4337 tgt->tgt_enable_64bit_addr = 1;
4338 /* 3 is reserved */
4339 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
4340 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
4341 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
4342
4343 mutex_lock(&qla_tgt_mutex);
4344 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
4345 mutex_unlock(&qla_tgt_mutex);
4346
4347 return 0;
4348}
4349
4350/* Must be called under tgt_host_action_mutex */
4351int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
4352{
4353 if (!ha->tgt.qla_tgt)
4354 return 0;
4355
4356 mutex_lock(&qla_tgt_mutex);
4357 list_del(&ha->tgt.qla_tgt->tgt_list_entry);
4358 mutex_unlock(&qla_tgt_mutex);
4359
4360 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
4361 vha->host_no, ha);
4362 qlt_release(ha->tgt.qla_tgt);
4363
4364 return 0;
4365}
4366
4367static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
4368 unsigned char *b)
4369{
4370 int i;
4371
4372 pr_debug("qla2xxx HW vha->node_name: ");
4373 for (i = 0; i < WWN_SIZE; i++)
4374 pr_debug("%02x ", vha->node_name[i]);
4375 pr_debug("\n");
4376 pr_debug("qla2xxx HW vha->port_name: ");
4377 for (i = 0; i < WWN_SIZE; i++)
4378 pr_debug("%02x ", vha->port_name[i]);
4379 pr_debug("\n");
4380
4381 pr_debug("qla2xxx passed configfs WWPN: ");
4382 put_unaligned_be64(wwpn, b);
4383 for (i = 0; i < WWN_SIZE; i++)
4384 pr_debug("%02x ", b[i]);
4385 pr_debug("\n");
4386}
4387
4388/**
4389 * qla_tgt_lport_register - register lport with external module
4390 *
4391 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
4392 * @wwpn: Passwd FC target WWPN
4393 * @callback: lport initialization callback for tcm_qla2xxx code
4394 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
4395 */
4396int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4397 int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
4398{
4399 struct qla_tgt *tgt;
4400 struct scsi_qla_host *vha;
4401 struct qla_hw_data *ha;
4402 struct Scsi_Host *host;
4403 unsigned long flags;
4404 int rc;
4405 u8 b[WWN_SIZE];
4406
4407 mutex_lock(&qla_tgt_mutex);
4408 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
4409 vha = tgt->vha;
4410 ha = vha->hw;
4411
4412 host = vha->host;
4413 if (!host)
4414 continue;
4415
4416 if (ha->tgt.tgt_ops != NULL)
4417 continue;
4418
4419 if (!(host->hostt->supported_mode & MODE_TARGET))
4420 continue;
4421
4422 spin_lock_irqsave(&ha->hardware_lock, flags);
4423 if (host->active_mode & MODE_TARGET) {
4424 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4425 host->host_no);
4426 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4427 continue;
4428 }
4429 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4430
4431 if (!scsi_host_get(host)) {
4432 ql_dbg(ql_dbg_tgt, vha, 0xe068,
4433 "Unable to scsi_host_get() for"
4434 " qla2xxx scsi_host\n");
4435 continue;
4436 }
4437 qlt_lport_dump(vha, wwpn, b);
4438
4439 if (memcmp(vha->port_name, b, WWN_SIZE)) {
4440 scsi_host_put(host);
4441 continue;
4442 }
4443 /*
4444 * Setup passed parameters ahead of invoking callback
4445 */
4446 ha->tgt.tgt_ops = qla_tgt_ops;
4447 ha->tgt.target_lport_ptr = target_lport_ptr;
4448 rc = (*callback)(vha);
4449 if (rc != 0) {
4450 ha->tgt.tgt_ops = NULL;
4451 ha->tgt.target_lport_ptr = NULL;
4452 }
4453 mutex_unlock(&qla_tgt_mutex);
4454 return rc;
4455 }
4456 mutex_unlock(&qla_tgt_mutex);
4457
4458 return -ENODEV;
4459}
4460EXPORT_SYMBOL(qlt_lport_register);
4461
4462/**
4463 * qla_tgt_lport_deregister - Degister lport
4464 *
4465 * @vha: Registered scsi_qla_host pointer
4466 */
4467void qlt_lport_deregister(struct scsi_qla_host *vha)
4468{
4469 struct qla_hw_data *ha = vha->hw;
4470 struct Scsi_Host *sh = vha->host;
4471 /*
4472 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
4473 */
4474 ha->tgt.target_lport_ptr = NULL;
4475 ha->tgt.tgt_ops = NULL;
4476 /*
4477 * Release the Scsi_Host reference for the underlying qla2xxx host
4478 */
4479 scsi_host_put(sh);
4480}
4481EXPORT_SYMBOL(qlt_lport_deregister);
4482
4483/* Must be called under HW lock */
4484void qlt_set_mode(struct scsi_qla_host *vha)
4485{
4486 struct qla_hw_data *ha = vha->hw;
4487
4488 switch (ql2x_ini_mode) {
4489 case QLA2XXX_INI_MODE_DISABLED:
4490 case QLA2XXX_INI_MODE_EXCLUSIVE:
4491 vha->host->active_mode = MODE_TARGET;
4492 break;
4493 case QLA2XXX_INI_MODE_ENABLED:
4494 vha->host->active_mode |= MODE_TARGET;
4495 break;
4496 default:
4497 break;
4498 }
4499
4500 if (ha->tgt.ini_mode_force_reverse)
4501 qla_reverse_ini_mode(vha);
4502}
4503
4504/* Must be called under HW lock */
4505void qlt_clear_mode(struct scsi_qla_host *vha)
4506{
4507 struct qla_hw_data *ha = vha->hw;
4508
4509 switch (ql2x_ini_mode) {
4510 case QLA2XXX_INI_MODE_DISABLED:
4511 vha->host->active_mode = MODE_UNKNOWN;
4512 break;
4513 case QLA2XXX_INI_MODE_EXCLUSIVE:
4514 vha->host->active_mode = MODE_INITIATOR;
4515 break;
4516 case QLA2XXX_INI_MODE_ENABLED:
4517 vha->host->active_mode &= ~MODE_TARGET;
4518 break;
4519 default:
4520 break;
4521 }
4522
4523 if (ha->tgt.ini_mode_force_reverse)
4524 qla_reverse_ini_mode(vha);
4525}
4526
4527/*
4528 * qla_tgt_enable_vha - NO LOCK HELD
4529 *
4530 * host_reset, bring up w/ Target Mode Enabled
4531 */
4532void
4533qlt_enable_vha(struct scsi_qla_host *vha)
4534{
4535 struct qla_hw_data *ha = vha->hw;
4536 struct qla_tgt *tgt = ha->tgt.qla_tgt;
4537 unsigned long flags;
4538
4539 if (!tgt) {
4540 ql_dbg(ql_dbg_tgt, vha, 0xe069,
4541 "Unable to locate qla_tgt pointer from"
4542 " struct qla_hw_data\n");
4543 dump_stack();
4544 return;
4545 }
4546
4547 spin_lock_irqsave(&ha->hardware_lock, flags);
4548 tgt->tgt_stopped = 0;
4549 qlt_set_mode(vha);
4550 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4551
4552 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4553 qla2xxx_wake_dpc(vha);
4554 qla2x00_wait_for_hba_online(vha);
4555}
4556EXPORT_SYMBOL(qlt_enable_vha);
4557
4558/*
4559 * qla_tgt_disable_vha - NO LOCK HELD
4560 *
4561 * Disable Target Mode and reset the adapter
4562 */
4563void
4564qlt_disable_vha(struct scsi_qla_host *vha)
4565{
4566 struct qla_hw_data *ha = vha->hw;
4567 struct qla_tgt *tgt = ha->tgt.qla_tgt;
4568 unsigned long flags;
4569
4570 if (!tgt) {
4571 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
4572 "Unable to locate qla_tgt pointer from"
4573 " struct qla_hw_data\n");
4574 dump_stack();
4575 return;
4576 }
4577
4578 spin_lock_irqsave(&ha->hardware_lock, flags);
4579 qlt_clear_mode(vha);
4580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4581
4582 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4583 qla2xxx_wake_dpc(vha);
4584 qla2x00_wait_for_hba_online(vha);
4585}
4586
4587/*
4588 * Called from qla_init.c:qla24xx_vport_create() contex to setup
4589 * the target mode specific struct scsi_qla_host and struct qla_hw_data
4590 * members.
4591 */
4592void
4593qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
4594{
4595 if (!qla_tgt_mode_enabled(vha))
4596 return;
4597
4598 mutex_init(&ha->tgt.tgt_mutex);
4599 mutex_init(&ha->tgt.tgt_host_action_mutex);
4600
4601 qlt_clear_mode(vha);
4602
4603 /*
4604 * NOTE: Currently the value is kept the same for <24xx and
4605 * >=24xx ISPs. If it is necessary to change it,
4606 * the check should be added for specific ISPs,
4607 * assigning the value appropriately.
4608 */
4609 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
4610}
4611
4612void
4613qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
4614{
4615 /*
4616 * FC-4 Feature bit 0 indicates target functionality to the name server.
4617 */
4618 if (qla_tgt_mode_enabled(vha)) {
4619 if (qla_ini_mode_enabled(vha))
4620 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
4621 else
4622 ct_req->req.rff_id.fc4_feature = BIT_0;
4623 } else if (qla_ini_mode_enabled(vha)) {
4624 ct_req->req.rff_id.fc4_feature = BIT_1;
4625 }
4626}
4627
4628/*
4629 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
4630 * @ha: HA context
4631 *
4632 * Beginning of ATIO ring has initialization control block already built
4633 * by nvram config routine.
4634 *
4635 * Returns 0 on success.
4636 */
4637void
4638qlt_init_atio_q_entries(struct scsi_qla_host *vha)
4639{
4640 struct qla_hw_data *ha = vha->hw;
4641 uint16_t cnt;
4642 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
4643
4644 if (!qla_tgt_mode_enabled(vha))
4645 return;
4646
4647 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
4648 pkt->u.raw.signature = ATIO_PROCESSED;
4649 pkt++;
4650 }
4651
4652}
4653
4654/*
4655 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
4656 * @ha: SCSI driver HA context
4657 */
4658void
4659qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
4660{
4661 struct qla_hw_data *ha = vha->hw;
4662 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4663 struct atio_from_isp *pkt;
4664 int cnt, i;
4665
4666 if (!vha->flags.online)
4667 return;
4668
4669 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
4670 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4671 cnt = pkt->u.raw.entry_count;
4672
4673 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
4674
4675 for (i = 0; i < cnt; i++) {
4676 ha->tgt.atio_ring_index++;
4677 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
4678 ha->tgt.atio_ring_index = 0;
4679 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4680 } else
4681 ha->tgt.atio_ring_ptr++;
4682
4683 pkt->u.raw.signature = ATIO_PROCESSED;
4684 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4685 }
4686 wmb();
4687 }
4688
4689 /* Adjust ring index */
4690 WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
4691}
4692
4693void
4694qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
4695{
4696 struct qla_hw_data *ha = vha->hw;
4697
4698/* FIXME: atio_q in/out for ha->mqenable=1..? */
4699 if (ha->mqenable) {
4700#if 0
4701 WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
4702 WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
4703 RD_REG_DWORD(&reg->isp25mq.atio_q_out);
4704#endif
4705 } else {
4706 /* Setup APTIO registers for target mode */
4707 WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
4708 WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
4709 RD_REG_DWORD(&reg->isp24.atio_q_out);
4710 }
4711}
4712
4713void
4714qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
4715{
4716 struct qla_hw_data *ha = vha->hw;
4717
4718 if (qla_tgt_mode_enabled(vha)) {
4719 if (!ha->tgt.saved_set) {
4720 /* We save only once */
4721 ha->tgt.saved_exchange_count = nv->exchange_count;
4722 ha->tgt.saved_firmware_options_1 =
4723 nv->firmware_options_1;
4724 ha->tgt.saved_firmware_options_2 =
4725 nv->firmware_options_2;
4726 ha->tgt.saved_firmware_options_3 =
4727 nv->firmware_options_3;
4728 ha->tgt.saved_set = 1;
4729 }
4730
4731 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
4732
4733 /* Enable target mode */
4734 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
4735
4736 /* Disable ini mode, if requested */
4737 if (!qla_ini_mode_enabled(vha))
4738 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
4739
4740 /* Disable Full Login after LIP */
4741 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4742 /* Enable initial LIP */
4743 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
4744 /* Enable FC tapes support */
4745 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4746 /* Disable Full Login after LIP */
4747 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4748 /* Enable target PRLI control */
4749 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
4750 } else {
4751 if (ha->tgt.saved_set) {
4752 nv->exchange_count = ha->tgt.saved_exchange_count;
4753 nv->firmware_options_1 =
4754 ha->tgt.saved_firmware_options_1;
4755 nv->firmware_options_2 =
4756 ha->tgt.saved_firmware_options_2;
4757 nv->firmware_options_3 =
4758 ha->tgt.saved_firmware_options_3;
4759 }
4760 return;
4761 }
4762
4763 /* out-of-order frames reassembly */
4764 nv->firmware_options_3 |= BIT_6|BIT_9;
4765
4766 if (ha->tgt.enable_class_2) {
4767 if (vha->flags.init_done)
4768 fc_host_supported_classes(vha->host) =
4769 FC_COS_CLASS2 | FC_COS_CLASS3;
4770
4771 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
4772 } else {
4773 if (vha->flags.init_done)
4774 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
4775
4776 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
4777 }
4778}
4779
4780void
4781qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
4782 struct init_cb_24xx *icb)
4783{
4784 struct qla_hw_data *ha = vha->hw;
4785
4786 if (ha->tgt.node_name_set) {
4787 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
4788 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
4789 }
4790}
4791
4792int
4793qlt_24xx_process_response_error(struct scsi_qla_host *vha,
4794 struct sts_entry_24xx *pkt)
4795{
4796 switch (pkt->entry_type) {
4797 case ABTS_RECV_24XX:
4798 case ABTS_RESP_24XX:
4799 case CTIO_TYPE7:
4800 case NOTIFY_ACK_TYPE:
4801 return 1;
4802 default:
4803 return 0;
4804 }
4805}
4806
4807void
4808qlt_modify_vp_config(struct scsi_qla_host *vha,
4809 struct vp_config_entry_24xx *vpmod)
4810{
4811 if (qla_tgt_mode_enabled(vha))
4812 vpmod->options_idx1 &= ~BIT_5;
4813 /* Disable ini mode, if requested */
4814 if (!qla_ini_mode_enabled(vha))
4815 vpmod->options_idx1 &= ~BIT_4;
4816}
4817
4818void
4819qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
4820{
4821 if (!QLA_TGT_MODE_ENABLED())
4822 return;
4823
4824 mutex_init(&ha->tgt.tgt_mutex);
4825 mutex_init(&ha->tgt.tgt_host_action_mutex);
4826 qlt_clear_mode(base_vha);
4827}
4828
4829int
4830qlt_mem_alloc(struct qla_hw_data *ha)
4831{
4832 if (!QLA_TGT_MODE_ENABLED())
4833 return 0;
4834
4835 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
4836 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
4837 if (!ha->tgt.tgt_vp_map)
4838 return -ENOMEM;
4839
4840 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
4841 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
4842 &ha->tgt.atio_dma, GFP_KERNEL);
4843 if (!ha->tgt.atio_ring) {
4844 kfree(ha->tgt.tgt_vp_map);
4845 return -ENOMEM;
4846 }
4847 return 0;
4848}
4849
4850void
4851qlt_mem_free(struct qla_hw_data *ha)
4852{
4853 if (!QLA_TGT_MODE_ENABLED())
4854 return;
4855
4856 if (ha->tgt.atio_ring) {
4857 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
4858 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
4859 ha->tgt.atio_dma);
4860 }
4861 kfree(ha->tgt.tgt_vp_map);
4862}
4863
4864/* vport_slock to be held by the caller */
4865void
4866qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
4867{
4868 if (!QLA_TGT_MODE_ENABLED())
4869 return;
4870
4871 switch (cmd) {
4872 case SET_VP_IDX:
4873 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
4874 break;
4875 case SET_AL_PA:
4876 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
4877 break;
4878 case RESET_VP_IDX:
4879 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
4880 break;
4881 case RESET_AL_PA:
4882 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
4883 break;
4884 }
4885}
4886
4887static int __init qlt_parse_ini_mode(void)
4888{
4889 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
4890 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
4891 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
4892 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
4893 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
4894 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
4895 else
4896 return false;
4897
4898 return true;
4899}
4900
4901int __init qlt_init(void)
4902{
4903 int ret;
4904
4905 if (!qlt_parse_ini_mode()) {
4906 ql_log(ql_log_fatal, NULL, 0xe06b,
4907 "qlt_parse_ini_mode() failed\n");
4908 return -EINVAL;
4909 }
4910
4911 if (!QLA_TGT_MODE_ENABLED())
4912 return 0;
4913
4914 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
4915 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
4916 NULL);
4917 if (!qla_tgt_cmd_cachep) {
4918 ql_log(ql_log_fatal, NULL, 0xe06c,
4919 "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
4920 return -ENOMEM;
4921 }
4922
4923 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
4924 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
4925 qla_tgt_mgmt_cmd), 0, NULL);
4926 if (!qla_tgt_mgmt_cmd_cachep) {
4927 ql_log(ql_log_fatal, NULL, 0xe06d,
4928 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
4929 ret = -ENOMEM;
4930 goto out;
4931 }
4932
4933 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
4934 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
4935 if (!qla_tgt_mgmt_cmd_mempool) {
4936 ql_log(ql_log_fatal, NULL, 0xe06e,
4937 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
4938 ret = -ENOMEM;
4939 goto out_mgmt_cmd_cachep;
4940 }
4941
4942 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
4943 if (!qla_tgt_wq) {
4944 ql_log(ql_log_fatal, NULL, 0xe06f,
4945 "alloc_workqueue for qla_tgt_wq failed\n");
4946 ret = -ENOMEM;
4947 goto out_cmd_mempool;
4948 }
4949 /*
4950 * Return 1 to signal that initiator-mode is being disabled
4951 */
4952 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
4953
4954out_cmd_mempool:
4955 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4956out_mgmt_cmd_cachep:
4957 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4958out:
4959 kmem_cache_destroy(qla_tgt_cmd_cachep);
4960 return ret;
4961}
4962
4963void qlt_exit(void)
4964{
4965 if (!QLA_TGT_MODE_ENABLED())
4966 return;
4967
4968 destroy_workqueue(qla_tgt_wq);
4969 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4970 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4971 kmem_cache_destroy(qla_tgt_cmd_cachep);
4972}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644
index 000000000000..9f9ef1644fd9
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -0,0 +1,1004 @@
1/*
2 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
3 * Copyright (C) 2004 - 2005 Leonid Stoljar
4 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
5 * Copyright (C) 2007 - 2010 ID7 Ltd.
6 *
7 * Forward port and refactoring to modern qla2xxx and target/configfs
8 *
9 * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * Additional file for the target driver support.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 2
16 * of the License, or (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23/*
24 * This is the global def file that is useful for including from the
25 * target portion.
26 */
27
28#ifndef __QLA_TARGET_H
29#define __QLA_TARGET_H
30
31#include "qla_def.h"
32
33/*
34 * Must be changed on any change in any initiator visible interfaces or
35 * data in the target add-on
36 */
37#define QLA2XXX_TARGET_MAGIC 269
38
39/*
40 * Must be changed on any change in any target visible interfaces or
41 * data in the initiator
42 */
43#define QLA2XXX_INITIATOR_MAGIC 57222
44
45#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
46#define QLA2XXX_INI_MODE_STR_DISABLED "disabled"
47#define QLA2XXX_INI_MODE_STR_ENABLED "enabled"
48
49#define QLA2XXX_INI_MODE_EXCLUSIVE 0
50#define QLA2XXX_INI_MODE_DISABLED 1
51#define QLA2XXX_INI_MODE_ENABLED 2
52
53#define QLA2XXX_COMMAND_COUNT_INIT 250
54#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
55
56/*
57 * Used to mark which completion handles (for RIO Status's) are for CTIO's
58 * vs. regular (non-target) info. This is checked for in
59 * qla2x00_process_response_queue() to see if a handle coming back in a
60 * multi-complete should come to the tgt driver or be handled there by qla2xxx
61 */
62#define CTIO_COMPLETION_HANDLE_MARK BIT_29
63#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
64#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
65#endif
66#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
67
68/* Used to mark CTIO as intermediate */
69#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
70
71#ifndef OF_SS_MODE_0
72/*
73 * ISP target entries - Flags bit definitions.
74 */
75#define OF_SS_MODE_0 0
76#define OF_SS_MODE_1 1
77#define OF_SS_MODE_2 2
78#define OF_SS_MODE_3 3
79
80#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */
81#define OF_DATA_IN BIT_6 /* Data in to initiator */
82 /* (data from target to initiator) */
83#define OF_DATA_OUT BIT_7 /* Data out from initiator */
84 /* (data from initiator to target) */
85#define OF_NO_DATA (BIT_7 | BIT_6)
86#define OF_INC_RC BIT_8 /* Increment command resource count */
87#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */
88#define OF_CONF_REQ BIT_13 /* Confirmation Requested */
89#define OF_TERM_EXCH BIT_14 /* Terminate exchange */
90#define OF_SSTS BIT_15 /* Send SCSI status */
91#endif
92
93#ifndef QLA_TGT_DATASEGS_PER_CMD32
94#define QLA_TGT_DATASEGS_PER_CMD32 3
95#define QLA_TGT_DATASEGS_PER_CONT32 7
96#define QLA_TGT_MAX_SG32(ql) \
97 (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
98 QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
99
100#define QLA_TGT_DATASEGS_PER_CMD64 2
101#define QLA_TGT_DATASEGS_PER_CONT64 5
102#define QLA_TGT_MAX_SG64(ql) \
103 (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
104 QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
105#endif
106
107#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
108#define QLA_TGT_DATASEGS_PER_CMD_24XX 1
109#define QLA_TGT_DATASEGS_PER_CONT_24XX 5
110#define QLA_TGT_MAX_SG_24XX(ql) \
111 (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
112 QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
113#endif
114#endif
115
116#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
117 ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
118 : (uint16_t)(iocb)->u.isp2x.target.id.standard)
119
120#ifndef IMMED_NOTIFY_TYPE
121#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
122/*
123 * ISP queue - immediate notify entry structure definition.
124 * This is sent by the ISP to the Target driver.
125 * This IOCB would have report of events sent by the
126 * initiator, that needs to be handled by the target
127 * driver immediately.
128 */
129struct imm_ntfy_from_isp {
130 uint8_t entry_type; /* Entry type. */
131 uint8_t entry_count; /* Entry count. */
132 uint8_t sys_define; /* System defined. */
133 uint8_t entry_status; /* Entry Status. */
134 union {
135 struct {
136 uint32_t sys_define_2; /* System defined. */
137 target_id_t target;
138 uint16_t lun;
139 uint8_t target_id;
140 uint8_t reserved_1;
141 uint16_t status_modifier;
142 uint16_t status;
143 uint16_t task_flags;
144 uint16_t seq_id;
145 uint16_t srr_rx_id;
146 uint32_t srr_rel_offs;
147 uint16_t srr_ui;
148#define SRR_IU_DATA_IN 0x1
149#define SRR_IU_DATA_OUT 0x5
150#define SRR_IU_STATUS 0x7
151 uint16_t srr_ox_id;
152 uint8_t reserved_2[28];
153 } isp2x;
154 struct {
155 uint32_t reserved;
156 uint16_t nport_handle;
157 uint16_t reserved_2;
158 uint16_t flags;
159#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
160#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
161 uint16_t srr_rx_id;
162 uint16_t status;
163 uint8_t status_subcode;
164 uint8_t reserved_3;
165 uint32_t exchange_address;
166 uint32_t srr_rel_offs;
167 uint16_t srr_ui;
168 uint16_t srr_ox_id;
169 uint8_t reserved_4[19];
170 uint8_t vp_index;
171 uint32_t reserved_5;
172 uint8_t port_id[3];
173 uint8_t reserved_6;
174 } isp24;
175 } u;
176 uint16_t reserved_7;
177 uint16_t ox_id;
178} __packed;
179#endif
180
181#ifndef NOTIFY_ACK_TYPE
182#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
183/*
184 * ISP queue - notify acknowledge entry structure definition.
185 * This is sent to the ISP from the target driver.
186 */
187struct nack_to_isp {
188 uint8_t entry_type; /* Entry type. */
189 uint8_t entry_count; /* Entry count. */
190 uint8_t sys_define; /* System defined. */
191 uint8_t entry_status; /* Entry Status. */
192 union {
193 struct {
194 uint32_t sys_define_2; /* System defined. */
195 target_id_t target;
196 uint8_t target_id;
197 uint8_t reserved_1;
198 uint16_t flags;
199 uint16_t resp_code;
200 uint16_t status;
201 uint16_t task_flags;
202 uint16_t seq_id;
203 uint16_t srr_rx_id;
204 uint32_t srr_rel_offs;
205 uint16_t srr_ui;
206 uint16_t srr_flags;
207 uint16_t srr_reject_code;
208 uint8_t srr_reject_vendor_uniq;
209 uint8_t srr_reject_code_expl;
210 uint8_t reserved_2[24];
211 } isp2x;
212 struct {
213 uint32_t handle;
214 uint16_t nport_handle;
215 uint16_t reserved_1;
216 uint16_t flags;
217 uint16_t srr_rx_id;
218 uint16_t status;
219 uint8_t status_subcode;
220 uint8_t reserved_3;
221 uint32_t exchange_address;
222 uint32_t srr_rel_offs;
223 uint16_t srr_ui;
224 uint16_t srr_flags;
225 uint8_t reserved_4[19];
226 uint8_t vp_index;
227 uint8_t srr_reject_vendor_uniq;
228 uint8_t srr_reject_code_expl;
229 uint8_t srr_reject_code;
230 uint8_t reserved_5[5];
231 } isp24;
232 } u;
233 uint8_t reserved[2];
234 uint16_t ox_id;
235} __packed;
236#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
237#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
238
239#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
240
241#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
242#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
243
244#define NOTIFY_ACK_SUCCESS 0x01
245#endif
246
247#ifndef ACCEPT_TGT_IO_TYPE
248#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
249#endif
250
251#ifndef CONTINUE_TGT_IO_TYPE
252#define CONTINUE_TGT_IO_TYPE 0x17
253/*
254 * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure.
255 * This structure is sent to the ISP 2xxx from target driver.
256 */
257struct ctio_to_2xxx {
258 uint8_t entry_type; /* Entry type. */
259 uint8_t entry_count; /* Entry count. */
260 uint8_t sys_define; /* System defined. */
261 uint8_t entry_status; /* Entry Status. */
262 uint32_t handle; /* System defined handle */
263 target_id_t target;
264 uint16_t rx_id;
265 uint16_t flags;
266 uint16_t status;
267 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
268 uint16_t dseg_count; /* Data segment count. */
269 uint32_t relative_offset;
270 uint32_t residual;
271 uint16_t reserved_1[3];
272 uint16_t scsi_status;
273 uint32_t transfer_length;
274 uint32_t dseg_0_address; /* Data segment 0 address. */
275 uint32_t dseg_0_length; /* Data segment 0 length. */
276 uint32_t dseg_1_address; /* Data segment 1 address. */
277 uint32_t dseg_1_length; /* Data segment 1 length. */
278 uint32_t dseg_2_address; /* Data segment 2 address. */
279 uint32_t dseg_2_length; /* Data segment 2 length. */
280} __packed;
281#define ATIO_PATH_INVALID 0x07
282#define ATIO_CANT_PROV_CAP 0x16
283#define ATIO_CDB_VALID 0x3D
284
285#define ATIO_EXEC_READ BIT_1
286#define ATIO_EXEC_WRITE BIT_0
287#endif
288
289#ifndef CTIO_A64_TYPE
290#define CTIO_A64_TYPE 0x1F
291#define CTIO_SUCCESS 0x01
292#define CTIO_ABORTED 0x02
293#define CTIO_INVALID_RX_ID 0x08
294#define CTIO_TIMEOUT 0x0B
295#define CTIO_LIP_RESET 0x0E
296#define CTIO_TARGET_RESET 0x17
297#define CTIO_PORT_UNAVAILABLE 0x28
298#define CTIO_PORT_LOGGED_OUT 0x29
299#define CTIO_PORT_CONF_CHANGED 0x2A
300#define CTIO_SRR_RECEIVED 0x45
301#endif
302
303#ifndef CTIO_RET_TYPE
304#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
305#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
306
307struct fcp_hdr {
308 uint8_t r_ctl;
309 uint8_t d_id[3];
310 uint8_t cs_ctl;
311 uint8_t s_id[3];
312 uint8_t type;
313 uint8_t f_ctl[3];
314 uint8_t seq_id;
315 uint8_t df_ctl;
316 uint16_t seq_cnt;
317 uint16_t ox_id;
318 uint16_t rx_id;
319 uint32_t parameter;
320} __packed;
321
322struct fcp_hdr_le {
323 uint8_t d_id[3];
324 uint8_t r_ctl;
325 uint8_t s_id[3];
326 uint8_t cs_ctl;
327 uint8_t f_ctl[3];
328 uint8_t type;
329 uint16_t seq_cnt;
330 uint8_t df_ctl;
331 uint8_t seq_id;
332 uint16_t rx_id;
333 uint16_t ox_id;
334 uint32_t parameter;
335} __packed;
336
337#define F_CTL_EXCH_CONTEXT_RESP BIT_23
338#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
339#define F_CTL_LAST_SEQ BIT_20
340#define F_CTL_END_SEQ BIT_19
341#define F_CTL_SEQ_INITIATIVE BIT_16
342
343#define R_CTL_BASIC_LINK_SERV 0x80
344#define R_CTL_B_ACC 0x4
345#define R_CTL_B_RJT 0x5
346
347struct atio7_fcp_cmnd {
348 uint64_t lun;
349 uint8_t cmnd_ref;
350 uint8_t task_attr:3;
351 uint8_t reserved:5;
352 uint8_t task_mgmt_flags;
353#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6
354#define FCP_CMND_TASK_MGMT_TARGET_RESET 5
355#define FCP_CMND_TASK_MGMT_LU_RESET 4
356#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2
357#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1
358 uint8_t wrdata:1;
359 uint8_t rddata:1;
360 uint8_t add_cdb_len:6;
361 uint8_t cdb[16];
362 /*
363 * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
364 * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
365 * BUILD_BUG_ON in qlt_init().
366 */
367 uint8_t add_cdb[4];
368 /* uint32_t data_length; */
369} __packed;
370
371/*
372 * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure.
373 * This is sent from the ISP to the target driver.
374 */
375struct atio_from_isp {
376 union {
377 struct {
378 uint16_t entry_hdr;
379 uint8_t sys_define; /* System defined. */
380 uint8_t entry_status; /* Entry Status. */
381 uint32_t sys_define_2; /* System defined. */
382 target_id_t target;
383 uint16_t rx_id;
384 uint16_t flags;
385 uint16_t status;
386 uint8_t command_ref;
387 uint8_t task_codes;
388 uint8_t task_flags;
389 uint8_t execution_codes;
390 uint8_t cdb[MAX_CMDSZ];
391 uint32_t data_length;
392 uint16_t lun;
393 uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
394 uint16_t reserved_32[6];
395 uint16_t ox_id;
396 } isp2x;
397 struct {
398 uint16_t entry_hdr;
399 uint8_t fcp_cmnd_len_low;
400 uint8_t fcp_cmnd_len_high:4;
401 uint8_t attr:4;
402 uint32_t exchange_addr;
403#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
404 struct fcp_hdr fcp_hdr;
405 struct atio7_fcp_cmnd fcp_cmnd;
406 } isp24;
407 struct {
408 uint8_t entry_type; /* Entry type. */
409 uint8_t entry_count; /* Entry count. */
410 uint8_t data[58];
411 uint32_t signature;
412#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
413 } raw;
414 } u;
415} __packed;
416
417#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
418
419/*
420 * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
421 * This structure is sent to the ISP 24xx from the target driver.
422 */
423
424struct ctio7_to_24xx {
425 uint8_t entry_type; /* Entry type. */
426 uint8_t entry_count; /* Entry count. */
427 uint8_t sys_define; /* System defined. */
428 uint8_t entry_status; /* Entry Status. */
429 uint32_t handle; /* System defined handle */
430 uint16_t nport_handle;
431#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
432 uint16_t timeout;
433 uint16_t dseg_count; /* Data segment count. */
434 uint8_t vp_index;
435 uint8_t add_flags;
436 uint8_t initiator_id[3];
437 uint8_t reserved;
438 uint32_t exchange_addr;
439 union {
440 struct {
441 uint16_t reserved1;
442 uint16_t flags;
443 uint32_t residual;
444 uint16_t ox_id;
445 uint16_t scsi_status;
446 uint32_t relative_offset;
447 uint32_t reserved2;
448 uint32_t transfer_length;
449 uint32_t reserved3;
450 /* Data segment 0 address. */
451 uint32_t dseg_0_address[2];
452 /* Data segment 0 length. */
453 uint32_t dseg_0_length;
454 } status0;
455 struct {
456 uint16_t sense_length;
457 uint16_t flags;
458 uint32_t residual;
459 uint16_t ox_id;
460 uint16_t scsi_status;
461 uint16_t response_len;
462 uint16_t reserved;
463 uint8_t sense_data[24];
464 } status1;
465 } u;
466} __packed;
467
468/*
469 * ISP queue - CTIO type 7 from ISP 24xx to target driver
470 * returned entry structure.
471 */
472struct ctio7_from_24xx {
473 uint8_t entry_type; /* Entry type. */
474 uint8_t entry_count; /* Entry count. */
475 uint8_t sys_define; /* System defined. */
476 uint8_t entry_status; /* Entry Status. */
477 uint32_t handle; /* System defined handle */
478 uint16_t status;
479 uint16_t timeout;
480 uint16_t dseg_count; /* Data segment count. */
481 uint8_t vp_index;
482 uint8_t reserved1[5];
483 uint32_t exchange_address;
484 uint16_t reserved2;
485 uint16_t flags;
486 uint32_t residual;
487 uint16_t ox_id;
488 uint16_t reserved3;
489 uint32_t relative_offset;
490 uint8_t reserved4[24];
491} __packed;
492
493/* CTIO7 flags values */
494#define CTIO7_FLAGS_SEND_STATUS BIT_15
495#define CTIO7_FLAGS_TERMINATE BIT_14
496#define CTIO7_FLAGS_CONFORM_REQ BIT_13
497#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
498#define CTIO7_FLAGS_STATUS_MODE_0 0
499#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
500#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
501#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
502#define CTIO7_FLAGS_DSD_PTR BIT_2
503#define CTIO7_FLAGS_DATA_IN BIT_1
504#define CTIO7_FLAGS_DATA_OUT BIT_0
505
506#define ELS_PLOGI 0x3
507#define ELS_FLOGI 0x4
508#define ELS_LOGO 0x5
509#define ELS_PRLI 0x20
510#define ELS_PRLO 0x21
511#define ELS_TPRLO 0x24
512#define ELS_PDISC 0x50
513#define ELS_ADISC 0x52
514
515/*
516 * ISP queue - ABTS received/response entries structure definition for 24xx.
517 */
518#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
519#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */
520
521/*
522 * ISP queue - ABTS received IOCB entry structure definition for 24xx.
523 * The ABTS BLS received from the wire is sent to the
524 * target driver by the ISP 24xx.
525 * The IOCB is placed on the response queue.
526 */
527struct abts_recv_from_24xx {
528 uint8_t entry_type; /* Entry type. */
529 uint8_t entry_count; /* Entry count. */
530 uint8_t sys_define; /* System defined. */
531 uint8_t entry_status; /* Entry Status. */
532 uint8_t reserved_1[6];
533 uint16_t nport_handle;
534 uint8_t reserved_2[2];
535 uint8_t vp_index;
536 uint8_t reserved_3:4;
537 uint8_t sof_type:4;
538 uint32_t exchange_address;
539 struct fcp_hdr_le fcp_hdr_le;
540 uint8_t reserved_4[16];
541 uint32_t exchange_addr_to_abort;
542} __packed;
543
544#define ABTS_PARAM_ABORT_SEQ BIT_0
545
546struct ba_acc_le {
547 uint16_t reserved;
548 uint8_t seq_id_last;
549 uint8_t seq_id_valid;
550#define SEQ_ID_VALID 0x80
551#define SEQ_ID_INVALID 0x00
552 uint16_t rx_id;
553 uint16_t ox_id;
554 uint16_t high_seq_cnt;
555 uint16_t low_seq_cnt;
556} __packed;
557
558struct ba_rjt_le {
559 uint8_t vendor_uniq;
560 uint8_t reason_expl;
561 uint8_t reason_code;
562#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1
563#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9
564 uint8_t reserved;
565} __packed;
566
567/*
568 * ISP queue - ABTS Response IOCB entry structure definition for 24xx.
569 * The ABTS response to the ABTS received is sent by the
570 * target driver to the ISP 24xx.
571 * The IOCB is placed on the request queue.
572 */
573struct abts_resp_to_24xx {
574 uint8_t entry_type; /* Entry type. */
575 uint8_t entry_count; /* Entry count. */
576 uint8_t sys_define; /* System defined. */
577 uint8_t entry_status; /* Entry Status. */
578 uint32_t handle;
579 uint16_t reserved_1;
580 uint16_t nport_handle;
581 uint16_t control_flags;
582#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
583 uint8_t vp_index;
584 uint8_t reserved_3:4;
585 uint8_t sof_type:4;
586 uint32_t exchange_address;
587 struct fcp_hdr_le fcp_hdr_le;
588 union {
589 struct ba_acc_le ba_acct;
590 struct ba_rjt_le ba_rjt;
591 } __packed payload;
592 uint32_t reserved_4;
593 uint32_t exchange_addr_to_abort;
594} __packed;
595
596/*
597 * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure.
598 * The ABTS response with completion status to the ABTS response
599 * (sent by the target driver to the ISP 24xx) is sent by the
600 * ISP24xx firmware to the target driver.
601 * The IOCB is placed on the response queue.
602 */
603struct abts_resp_from_24xx_fw {
604 uint8_t entry_type; /* Entry type. */
605 uint8_t entry_count; /* Entry count. */
606 uint8_t sys_define; /* System defined. */
607 uint8_t entry_status; /* Entry Status. */
608 uint32_t handle;
609 uint16_t compl_status;
610#define ABTS_RESP_COMPL_SUCCESS 0
611#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
612 uint16_t nport_handle;
613 uint16_t reserved_1;
614 uint8_t reserved_2;
615 uint8_t reserved_3:4;
616 uint8_t sof_type:4;
617 uint32_t exchange_address;
618 struct fcp_hdr_le fcp_hdr_le;
619 uint8_t reserved_4[8];
620 uint32_t error_subcode1;
621#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
622 uint32_t error_subcode2;
623 uint32_t exchange_addr_to_abort;
624} __packed;
625
626/********************************************************************\
627 * Type Definitions used by initiator & target halves
628\********************************************************************/
629
630struct qla_tgt_mgmt_cmd;
631struct qla_tgt_sess;
632
633/*
634 * This structure provides a template of function calls that the
635 * target driver (from within qla_target.c) can issue to the
636 * target module (tcm_qla2xxx).
637 */
638struct qla_tgt_func_tmpl {
639
640 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
641 unsigned char *, uint32_t, int, int, int);
642 int (*handle_data)(struct qla_tgt_cmd *);
643 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
644 uint32_t);
645 void (*free_cmd)(struct qla_tgt_cmd *);
646 void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
647 void (*free_session)(struct qla_tgt_sess *);
648
649 int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
650 void *, uint8_t *, uint16_t);
651 struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
652 const uint16_t);
653 struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
654 const uint8_t *);
655 void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
656 void (*put_sess)(struct qla_tgt_sess *);
657 void (*shutdown_sess)(struct qla_tgt_sess *);
658};
659
660int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
661
662#include <target/target_core_base.h>
663
664#define QLA_TGT_TIMEOUT 10 /* in seconds */
665
666#define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */
667
668/* Immediate notify status constants */
669#define IMM_NTFY_LIP_RESET 0x000E
670#define IMM_NTFY_LIP_LINK_REINIT 0x000F
671#define IMM_NTFY_IOCB_OVERFLOW 0x0016
672#define IMM_NTFY_ABORT_TASK 0x0020
673#define IMM_NTFY_PORT_LOGOUT 0x0029
674#define IMM_NTFY_PORT_CONFIG 0x002A
675#define IMM_NTFY_GLBL_TPRLO 0x002D
676#define IMM_NTFY_GLBL_LOGO 0x002E
677#define IMM_NTFY_RESOURCE 0x0034
678#define IMM_NTFY_MSG_RX 0x0036
679#define IMM_NTFY_SRR 0x0045
680#define IMM_NTFY_ELS 0x0046
681
682/* Immediate notify task flags */
683#define IMM_NTFY_TASK_MGMT_SHIFT 8
684
685#define QLA_TGT_CLEAR_ACA 0x40
686#define QLA_TGT_TARGET_RESET 0x20
687#define QLA_TGT_LUN_RESET 0x10
688#define QLA_TGT_CLEAR_TS 0x04
689#define QLA_TGT_ABORT_TS 0x02
690#define QLA_TGT_ABORT_ALL_SESS 0xFFFF
691#define QLA_TGT_ABORT_ALL 0xFFFE
692#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
693#define QLA_TGT_NEXUS_LOSS 0xFFFC
694
695/* Notify Acknowledge flags */
696#define NOTIFY_ACK_RES_COUNT BIT_8
697#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5
698#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
699
700/* Command's states */
701#define QLA_TGT_STATE_NEW 0 /* New command + target processing */
702#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
703#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
704#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
705#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */
706
707/* Special handles */
708#define QLA_TGT_NULL_HANDLE 0
709#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
710
711/* ATIO task_codes field */
712#define ATIO_SIMPLE_QUEUE 0
713#define ATIO_HEAD_OF_QUEUE 1
714#define ATIO_ORDERED_QUEUE 2
715#define ATIO_ACA_QUEUE 4
716#define ATIO_UNTAGGED 5
717
718/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
719#define FC_TM_SUCCESS 0
720#define FC_TM_BAD_FCP_DATA 1
721#define FC_TM_BAD_CMD 2
722#define FC_TM_FCP_DATA_MISMATCH 3
723#define FC_TM_REJECT 4
724#define FC_TM_FAILED 5
725
726/*
727 * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
728 * terminated, so no more actions is needed and success should be returned
729 * to target.
730 */
731#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
732
733#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
734#define pci_dma_lo32(a) (a & 0xffffffff)
735#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
736#else
737#define pci_dma_lo32(a) (a & 0xffffffff)
738#define pci_dma_hi32(a) 0
739#endif
740
741#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
742 (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
743
744struct qla_port_24xx_data {
745 uint8_t port_name[WWN_SIZE];
746 uint16_t loop_id;
747 uint16_t reserved;
748};
749
750struct qla_tgt {
751 struct scsi_qla_host *vha;
752 struct qla_hw_data *ha;
753
754 /*
755 * To sync between IRQ handlers and qlt_target_release(). Needed,
756 * because req_pkt() can drop/reaquire HW lock inside. Protected by
757 * HW lock.
758 */
759 int irq_cmd_count;
760
761 int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
762
763 /* Target's flags, serialized by pha->hardware_lock */
764 unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
765 unsigned int link_reinit_iocb_pending:1;
766
767 /*
768 * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
769 * OR hardware_lock for reading.
770 */
771 int tgt_stop; /* the target mode driver is being stopped */
772 int tgt_stopped; /* the target mode driver has been stopped */
773
774 /* Count of sessions refering qla_tgt. Protected by hardware_lock. */
775 int sess_count;
776
777 /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
778 struct list_head sess_list;
779
780 /* Protected by hardware_lock */
781 struct list_head del_sess_list;
782 struct delayed_work sess_del_work;
783
784 spinlock_t sess_work_lock;
785 struct list_head sess_works_list;
786 struct work_struct sess_work;
787
788 struct imm_ntfy_from_isp link_reinit_iocb;
789 wait_queue_head_t waitQ;
790 int notify_ack_expected;
791 int abts_resp_expected;
792 int modify_lun_expected;
793
794 int ctio_srr_id;
795 int imm_srr_id;
796 spinlock_t srr_lock;
797 struct list_head srr_ctio_list;
798 struct list_head srr_imm_list;
799 struct work_struct srr_work;
800
801 atomic_t tgt_global_resets_count;
802
803 struct list_head tgt_list_entry;
804};
805
806/*
807 * Equivilant to IT Nexus (Initiator-Target)
808 */
809struct qla_tgt_sess {
810 uint16_t loop_id;
811 port_id_t s_id;
812
813 unsigned int conf_compl_supported:1;
814 unsigned int deleted:1;
815 unsigned int local:1;
816 unsigned int tearing_down:1;
817
818 struct se_session *se_sess;
819 struct scsi_qla_host *vha;
820 struct qla_tgt *tgt;
821
822 struct list_head sess_list_entry;
823 unsigned long expires;
824 struct list_head del_list_entry;
825
826 uint8_t port_name[WWN_SIZE];
827 struct work_struct free_work;
828};
829
830struct qla_tgt_cmd {
831 struct qla_tgt_sess *sess;
832 int state;
833 struct se_cmd se_cmd;
834 struct work_struct free_work;
835 struct work_struct work;
836 /* Sense buffer that will be mapped into outgoing status */
837 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
838
839 /* to save extra sess dereferences */
840 unsigned int conf_compl_supported:1;
841 unsigned int sg_mapped:1;
842 unsigned int free_sg:1;
843 unsigned int aborted:1; /* Needed in case of SRR */
844 unsigned int write_data_transferred:1;
845
846 struct scatterlist *sg; /* cmd data buffer SG vector */
847 int sg_cnt; /* SG segments count */
848 int bufflen; /* cmd buffer length */
849 int offset;
850 uint32_t tag;
851 uint32_t unpacked_lun;
852 enum dma_data_direction dma_data_direction;
853
854 uint16_t loop_id; /* to save extra sess dereferences */
855 struct qla_tgt *tgt; /* to save extra sess dereferences */
856 struct scsi_qla_host *vha;
857 struct list_head cmd_list;
858
859 struct atio_from_isp atio;
860};
861
862struct qla_tgt_sess_work_param {
863 struct list_head sess_works_list_entry;
864
865#define QLA_TGT_SESS_WORK_ABORT 1
866#define QLA_TGT_SESS_WORK_TM 2
867 int type;
868
869 union {
870 struct abts_recv_from_24xx abts;
871 struct imm_ntfy_from_isp tm_iocb;
872 struct atio_from_isp tm_iocb2;
873 };
874};
875
876struct qla_tgt_mgmt_cmd {
877 uint8_t tmr_func;
878 uint8_t fc_tm_rsp;
879 struct qla_tgt_sess *sess;
880 struct se_cmd se_cmd;
881 struct work_struct free_work;
882 unsigned int flags;
883#define QLA24XX_MGMT_SEND_NACK 1
884 union {
885 struct atio_from_isp atio;
886 struct imm_ntfy_from_isp imm_ntfy;
887 struct abts_recv_from_24xx abts;
888 } __packed orig_iocb;
889};
890
891struct qla_tgt_prm {
892 struct qla_tgt_cmd *cmd;
893 struct qla_tgt *tgt;
894 void *pkt;
895 struct scatterlist *sg; /* cmd data buffer SG vector */
896 int seg_cnt;
897 int req_cnt;
898 uint16_t rq_result;
899 uint16_t scsi_status;
900 unsigned char *sense_buffer;
901 int sense_buffer_len;
902 int residual;
903 int add_status_pkt;
904};
905
906struct qla_tgt_srr_imm {
907 struct list_head srr_list_entry;
908 int srr_id;
909 struct imm_ntfy_from_isp imm_ntfy;
910};
911
912struct qla_tgt_srr_ctio {
913 struct list_head srr_list_entry;
914 int srr_id;
915 struct qla_tgt_cmd *cmd;
916};
917
918#define QLA_TGT_XMIT_DATA 1
919#define QLA_TGT_XMIT_STATUS 2
920#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
921
922
923extern struct qla_tgt_data qla_target;
924/*
925 * Internal function prototypes
926 */
927void qlt_disable_vha(struct scsi_qla_host *);
928
929/*
930 * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
931 */
932extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
933extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
934extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
935 int (*callback)(struct scsi_qla_host *), void *);
936extern void qlt_lport_deregister(struct scsi_qla_host *);
937extern void qlt_unreg_sess(struct qla_tgt_sess *);
938extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
939extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
940extern void qlt_set_mode(struct scsi_qla_host *ha);
941extern void qlt_clear_mode(struct scsi_qla_host *ha);
942extern int __init qlt_init(void);
943extern void qlt_exit(void);
944extern void qlt_update_vp_map(struct scsi_qla_host *, int);
945
946/*
947 * This macro is used during early initializations when host->active_mode
948 * is not set. Right now, ha value is ignored.
949 */
950#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
951
952static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
953{
954 return ha->host->active_mode & MODE_TARGET;
955}
956
957static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
958{
959 return ha->host->active_mode & MODE_INITIATOR;
960}
961
962static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
963{
964 if (ha->host->active_mode & MODE_INITIATOR)
965 ha->host->active_mode &= ~MODE_INITIATOR;
966 else
967 ha->host->active_mode |= MODE_INITIATOR;
968}
969
970/*
971 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
972 */
973extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
974 struct atio_from_isp *);
975extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
976extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
977extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
978extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
979extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
980extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
981extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
982extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
983extern void qlt_enable_vha(struct scsi_qla_host *);
984extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
985extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
986extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
987extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
988extern void qlt_24xx_config_rings(struct scsi_qla_host *,
989 device_reg_t __iomem *);
990extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
991 struct nvram_24xx *);
992extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
993 struct init_cb_24xx *);
994extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
995 struct sts_entry_24xx *);
996extern void qlt_modify_vp_config(struct scsi_qla_host *,
997 struct vp_config_entry_24xx *);
998extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
999extern int qlt_mem_alloc(struct qla_hw_data *);
1000extern void qlt_mem_free(struct qla_hw_data *);
1001extern void qlt_stop_phase1(struct qla_tgt *);
1002extern void qlt_stop_phase2(struct qla_tgt *);
1003
1004#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 000000000000..6e64314dbbb3
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,1919 @@
1/*******************************************************************************
2 * This file contains tcm implementation using v4 configfs fabric infrastructure
3 * for QLogic target mode HBAs
4 *
5 * ?? Copyright 2010-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL)
8 * version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
13 * the TCM_FC / Open-FCoE.org fabric module.
14 *
15 * Copyright (c) 2010 Cisco Systems, Inc
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 ****************************************************************************/
27
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <generated/utsrelease.h>
32#include <linux/utsname.h>
33#include <linux/init.h>
34#include <linux/list.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <linux/string.h>
42#include <linux/ctype.h>
43#include <asm/unaligned.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_device.h>
47#include <scsi/scsi_cmnd.h>
48#include <target/target_core_base.h>
49#include <target/target_core_fabric.h>
50#include <target/target_core_fabric_configfs.h>
51#include <target/target_core_configfs.h>
52#include <target/configfs_macros.h>
53
54#include "qla_def.h"
55#include "qla_target.h"
56#include "tcm_qla2xxx.h"
57
58struct workqueue_struct *tcm_qla2xxx_free_wq;
59struct workqueue_struct *tcm_qla2xxx_cmd_wq;
60
61static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
62{
63 return 1;
64}
65
66static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
67{
68 return 0;
69}
70
71/*
72 * Parse WWN.
73 * If strict, we require lower-case hex and colon separators to be sure
74 * the name is the same as what would be generated by ft_format_wwn()
75 * so the name and wwn are mapped one-to-one.
76 */
77static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
78{
79 const char *cp;
80 char c;
81 u32 nibble;
82 u32 byte = 0;
83 u32 pos = 0;
84 u32 err;
85
86 *wwn = 0;
87 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
88 c = *cp;
89 if (c == '\n' && cp[1] == '\0')
90 continue;
91 if (strict && pos++ == 2 && byte++ < 7) {
92 pos = 0;
93 if (c == ':')
94 continue;
95 err = 1;
96 goto fail;
97 }
98 if (c == '\0') {
99 err = 2;
100 if (strict && byte != 8)
101 goto fail;
102 return cp - name;
103 }
104 err = 3;
105 if (isdigit(c))
106 nibble = c - '0';
107 else if (isxdigit(c) && (islower(c) || !strict))
108 nibble = tolower(c) - 'a' + 10;
109 else
110 goto fail;
111 *wwn = (*wwn << 4) | nibble;
112 }
113 err = 4;
114fail:
115 pr_debug("err %u len %zu pos %u byte %u\n",
116 err, cp - name, pos, byte);
117 return -1;
118}
119
120static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
121{
122 u8 b[8];
123
124 put_unaligned_be64(wwn, b);
125 return snprintf(buf, len,
126 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
127 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
128}
129
130static char *tcm_qla2xxx_get_fabric_name(void)
131{
132 return "qla2xxx";
133}
134
135/*
136 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
137 */
138static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
139{
140 unsigned int i, j;
141 u8 wwn[8];
142
143 memset(wwn, 0, sizeof(wwn));
144
145 /* Validate and store the new name */
146 for (i = 0, j = 0; i < 16; i++) {
147 int value;
148
149 value = hex_to_bin(*ns++);
150 if (value >= 0)
151 j = (j << 4) | value;
152 else
153 return -EINVAL;
154
155 if (i % 2) {
156 wwn[i/2] = j & 0xff;
157 j = 0;
158 }
159 }
160
161 *nm = wwn_to_u64(wwn);
162 return 0;
163}
164
165/*
166 * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
167 * store_fc_host_vport_create()
168 */
169static int tcm_qla2xxx_npiv_parse_wwn(
170 const char *name,
171 size_t count,
172 u64 *wwpn,
173 u64 *wwnn)
174{
175 unsigned int cnt = count;
176 int rc;
177
178 *wwpn = 0;
179 *wwnn = 0;
180
181 /* count may include a LF at end of string */
182 if (name[cnt-1] == '\n')
183 cnt--;
184
185 /* validate we have enough characters for WWPN */
186 if ((cnt != (16+1+16)) || (name[16] != ':'))
187 return -EINVAL;
188
189 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
190 if (rc != 0)
191 return rc;
192
193 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
194 if (rc != 0)
195 return rc;
196
197 return 0;
198}
199
200static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
201 u64 wwpn, u64 wwnn)
202{
203 u8 b[8], b2[8];
204
205 put_unaligned_be64(wwpn, b);
206 put_unaligned_be64(wwnn, b2);
207 return snprintf(buf, len,
208 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
209 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
210 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
211 b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
212}
213
214static char *tcm_qla2xxx_npiv_get_fabric_name(void)
215{
216 return "qla2xxx_npiv";
217}
218
219static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
220{
221 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
222 struct tcm_qla2xxx_tpg, se_tpg);
223 struct tcm_qla2xxx_lport *lport = tpg->lport;
224 u8 proto_id;
225
226 switch (lport->lport_proto_id) {
227 case SCSI_PROTOCOL_FCP:
228 default:
229 proto_id = fc_get_fabric_proto_ident(se_tpg);
230 break;
231 }
232
233 return proto_id;
234}
235
236static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
237{
238 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
239 struct tcm_qla2xxx_tpg, se_tpg);
240 struct tcm_qla2xxx_lport *lport = tpg->lport;
241
242 return &lport->lport_name[0];
243}
244
245static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
246{
247 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
248 struct tcm_qla2xxx_tpg, se_tpg);
249 struct tcm_qla2xxx_lport *lport = tpg->lport;
250
251 return &lport->lport_npiv_name[0];
252}
253
254static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
255{
256 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
257 struct tcm_qla2xxx_tpg, se_tpg);
258 return tpg->lport_tpgt;
259}
260
261static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
262{
263 return 1;
264}
265
266static u32 tcm_qla2xxx_get_pr_transport_id(
267 struct se_portal_group *se_tpg,
268 struct se_node_acl *se_nacl,
269 struct t10_pr_registration *pr_reg,
270 int *format_code,
271 unsigned char *buf)
272{
273 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
274 struct tcm_qla2xxx_tpg, se_tpg);
275 struct tcm_qla2xxx_lport *lport = tpg->lport;
276 int ret = 0;
277
278 switch (lport->lport_proto_id) {
279 case SCSI_PROTOCOL_FCP:
280 default:
281 ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
282 format_code, buf);
283 break;
284 }
285
286 return ret;
287}
288
289static u32 tcm_qla2xxx_get_pr_transport_id_len(
290 struct se_portal_group *se_tpg,
291 struct se_node_acl *se_nacl,
292 struct t10_pr_registration *pr_reg,
293 int *format_code)
294{
295 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
296 struct tcm_qla2xxx_tpg, se_tpg);
297 struct tcm_qla2xxx_lport *lport = tpg->lport;
298 int ret = 0;
299
300 switch (lport->lport_proto_id) {
301 case SCSI_PROTOCOL_FCP:
302 default:
303 ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
304 format_code);
305 break;
306 }
307
308 return ret;
309}
310
311static char *tcm_qla2xxx_parse_pr_out_transport_id(
312 struct se_portal_group *se_tpg,
313 const char *buf,
314 u32 *out_tid_len,
315 char **port_nexus_ptr)
316{
317 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
318 struct tcm_qla2xxx_tpg, se_tpg);
319 struct tcm_qla2xxx_lport *lport = tpg->lport;
320 char *tid = NULL;
321
322 switch (lport->lport_proto_id) {
323 case SCSI_PROTOCOL_FCP:
324 default:
325 tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
326 port_nexus_ptr);
327 break;
328 }
329
330 return tid;
331}
332
333static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
334{
335 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
336 struct tcm_qla2xxx_tpg, se_tpg);
337
338 return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
339}
340
341static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
342{
343 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
344 struct tcm_qla2xxx_tpg, se_tpg);
345
346 return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
347}
348
349static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
350{
351 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
352 struct tcm_qla2xxx_tpg, se_tpg);
353
354 return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
355}
356
357static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
358{
359 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
360 struct tcm_qla2xxx_tpg, se_tpg);
361
362 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
363}
364
365static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
366 struct se_portal_group *se_tpg)
367{
368 struct tcm_qla2xxx_nacl *nacl;
369
370 nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
371 if (!nacl) {
372 pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
373 return NULL;
374 }
375
376 return &nacl->se_node_acl;
377}
378
379static void tcm_qla2xxx_release_fabric_acl(
380 struct se_portal_group *se_tpg,
381 struct se_node_acl *se_nacl)
382{
383 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
384 struct tcm_qla2xxx_nacl, se_node_acl);
385 kfree(nacl);
386}
387
388static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
389{
390 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
391 struct tcm_qla2xxx_tpg, se_tpg);
392
393 return tpg->lport_tpgt;
394}
395
396static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
397{
398 struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
399 struct qla_tgt_mgmt_cmd, free_work);
400
401 transport_generic_free_cmd(&mcmd->se_cmd, 0);
402}
403
404/*
405 * Called from qla_target_template->free_mcmd(), and will call
406 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
407 * release callback. qla_hw_data->hardware_lock is expected to be held
408 */
409static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
410{
411 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
412 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
413}
414
415static void tcm_qla2xxx_complete_free(struct work_struct *work)
416{
417 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
418
419 transport_generic_free_cmd(&cmd->se_cmd, 0);
420}
421
422/*
423 * Called from qla_target_template->free_cmd(), and will call
424 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
425 * release callback. qla_hw_data->hardware_lock is expected to be held
426 */
427static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
428{
429 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
430 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
431}
432
433/*
434 * Called from struct target_core_fabric_ops->check_stop_free() context
435 */
436static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
437{
438 return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
439}
440
441/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
442 * fabric descriptor @se_cmd command to release
443 */
444static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
445{
446 struct qla_tgt_cmd *cmd;
447
448 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
449 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
450 struct qla_tgt_mgmt_cmd, se_cmd);
451 qlt_free_mcmd(mcmd);
452 return;
453 }
454
455 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
456 qlt_free_cmd(cmd);
457}
458
459static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
460{
461 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
462 struct scsi_qla_host *vha;
463 unsigned long flags;
464
465 BUG_ON(!sess);
466 vha = sess->vha;
467
468 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
469 sess->tearing_down = 1;
470 target_splice_sess_cmd_list(se_sess);
471 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
472
473 return 1;
474}
475
476static void tcm_qla2xxx_close_session(struct se_session *se_sess)
477{
478 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
479 struct scsi_qla_host *vha;
480 unsigned long flags;
481
482 BUG_ON(!sess);
483 vha = sess->vha;
484
485 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
486 qlt_unreg_sess(sess);
487 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
488}
489
490static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
491{
492 return 0;
493}
494
495/*
496 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
497 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
498 * that data is coming from the target (eg handling a READ). However,
499 * this is just the opposite of what we have to tell the DMA mapping
500 * layer -- eg when handling a READ, the HBA will have to DMA the data
501 * out of memory so it can send it to the initiator, which means we
502 * need to use DMA_TO_DEVICE when we map the data.
503 */
504static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
505{
506 if (se_cmd->se_cmd_flags & SCF_BIDI)
507 return DMA_BIDIRECTIONAL;
508
509 switch (se_cmd->data_direction) {
510 case DMA_TO_DEVICE:
511 return DMA_FROM_DEVICE;
512 case DMA_FROM_DEVICE:
513 return DMA_TO_DEVICE;
514 case DMA_NONE:
515 default:
516 return DMA_NONE;
517 }
518}
519
520static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
521{
522 struct qla_tgt_cmd *cmd = container_of(se_cmd,
523 struct qla_tgt_cmd, se_cmd);
524
525 cmd->bufflen = se_cmd->data_length;
526 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
527
528 cmd->sg_cnt = se_cmd->t_data_nents;
529 cmd->sg = se_cmd->t_data_sg;
530
531 /*
532 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
533 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
534 */
535 return qlt_rdy_to_xfer(cmd);
536}
537
538static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
539{
540 unsigned long flags;
541 /*
542 * Check for WRITE_PENDING status to determine if we need to wait for
543 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
544 */
545 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
546 if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
547 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
548 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
549 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
550 3000);
551 return 0;
552 }
553 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
554
555 return 0;
556}
557
558static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
559{
560 return;
561}
562
563static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
564{
565 struct qla_tgt_cmd *cmd = container_of(se_cmd,
566 struct qla_tgt_cmd, se_cmd);
567
568 return cmd->tag;
569}
570
571static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
572{
573 return 0;
574}
575
576/*
577 * Called from process context in qla_target.c:qlt_do_work() code
578 */
579static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
580 unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
581 int data_dir, int bidi)
582{
583 struct se_cmd *se_cmd = &cmd->se_cmd;
584 struct se_session *se_sess;
585 struct qla_tgt_sess *sess;
586 int flags = TARGET_SCF_ACK_KREF;
587
588 if (bidi)
589 flags |= TARGET_SCF_BIDI_OP;
590
591 sess = cmd->sess;
592 if (!sess) {
593 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
594 return -EINVAL;
595 }
596
597 se_sess = sess->se_sess;
598 if (!se_sess) {
599 pr_err("Unable to locate active struct se_session\n");
600 return -EINVAL;
601 }
602
603 target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
604 cmd->unpacked_lun, data_length, fcp_task_attr,
605 data_dir, flags);
606 return 0;
607}
608
609static void tcm_qla2xxx_do_rsp(struct work_struct *work)
610{
611 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
612 /*
613 * Dispatch ->queue_status from workqueue process context
614 */
615 transport_generic_request_failure(&cmd->se_cmd);
616}
617
618/*
619 * Called from qla_target.c:qlt_do_ctio_completion()
620 */
621static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
622{
623 struct se_cmd *se_cmd = &cmd->se_cmd;
624 unsigned long flags;
625 /*
626 * Ensure that the complete FCP WRITE payload has been received.
627 * Otherwise return an exception via CHECK_CONDITION status.
628 */
629 if (!cmd->write_data_transferred) {
630 /*
631 * Check if se_cmd has already been aborted via LUN_RESET, and
632 * waiting upon completion in tcm_qla2xxx_write_pending_status()
633 */
634 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
635 if (se_cmd->transport_state & CMD_T_ABORTED) {
636 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
637 complete(&se_cmd->t_transport_stop_comp);
638 return 0;
639 }
640 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
641
642 se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
643 INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
644 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
645 return 0;
646 }
647 /*
648 * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
649 * status to the backstore processing thread.
650 */
651 return transport_generic_handle_data(&cmd->se_cmd);
652}
653
654/*
655 * Called from qla_target.c:qlt_issue_task_mgmt()
656 */
657static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
658 uint8_t tmr_func, uint32_t tag)
659{
660 struct qla_tgt_sess *sess = mcmd->sess;
661 struct se_cmd *se_cmd = &mcmd->se_cmd;
662
663 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
664 tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
665}
666
667static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
668{
669 struct qla_tgt_cmd *cmd = container_of(se_cmd,
670 struct qla_tgt_cmd, se_cmd);
671
672 cmd->bufflen = se_cmd->data_length;
673 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
674 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
675
676 cmd->sg_cnt = se_cmd->t_data_nents;
677 cmd->sg = se_cmd->t_data_sg;
678 cmd->offset = 0;
679
680 /*
681 * Now queue completed DATA_IN the qla2xxx LLD and response ring
682 */
683 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
684 se_cmd->scsi_status);
685}
686
687static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
688{
689 struct qla_tgt_cmd *cmd = container_of(se_cmd,
690 struct qla_tgt_cmd, se_cmd);
691 int xmit_type = QLA_TGT_XMIT_STATUS;
692
693 cmd->bufflen = se_cmd->data_length;
694 cmd->sg = NULL;
695 cmd->sg_cnt = 0;
696 cmd->offset = 0;
697 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
698 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
699
700 if (se_cmd->data_direction == DMA_FROM_DEVICE) {
701 /*
702 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
703 * for qla_tgt_xmit_response LLD code
704 */
705 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
706 se_cmd->residual_count = se_cmd->data_length;
707
708 cmd->bufflen = 0;
709 }
710 /*
711 * Now queue status response to qla2xxx LLD code and response ring
712 */
713 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
714}
715
716static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
717{
718 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
719 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
720 struct qla_tgt_mgmt_cmd, se_cmd);
721
722 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
723 mcmd, se_tmr->function, se_tmr->response);
724 /*
725 * Do translation between TCM TM response codes and
726 * QLA2xxx FC TM response codes.
727 */
728 switch (se_tmr->response) {
729 case TMR_FUNCTION_COMPLETE:
730 mcmd->fc_tm_rsp = FC_TM_SUCCESS;
731 break;
732 case TMR_TASK_DOES_NOT_EXIST:
733 mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
734 break;
735 case TMR_FUNCTION_REJECTED:
736 mcmd->fc_tm_rsp = FC_TM_REJECT;
737 break;
738 case TMR_LUN_DOES_NOT_EXIST:
739 default:
740 mcmd->fc_tm_rsp = FC_TM_FAILED;
741 break;
742 }
743 /*
744 * Queue the TM response to QLA2xxx LLD to build a
745 * CTIO response packet.
746 */
747 qlt_xmit_tm_rsp(mcmd);
748
749 return 0;
750}
751
752static u16 tcm_qla2xxx_get_fabric_sense_len(void)
753{
754 return 0;
755}
756
757static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
758 u32 sense_length)
759{
760 return 0;
761}
762
763/* Local pointer to allocated TCM configfs fabric module */
764struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
765struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
766
767static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
768 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
769/*
770 * Expected to be called with struct qla_hw_data->hardware_lock held
771 */
772static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
773{
774 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
775 struct se_portal_group *se_tpg = se_nacl->se_tpg;
776 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
777 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
778 struct tcm_qla2xxx_lport, lport_wwn);
779 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
780 struct tcm_qla2xxx_nacl, se_node_acl);
781 void *node;
782
783 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
784
785 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
786 WARN_ON(node && (node != se_nacl));
787
788 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
789 se_nacl, nacl->nport_wwnn, nacl->nport_id);
790 /*
791 * Now clear the se_nacl and session pointers from our HW lport lookup
792 * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
793 *
794 * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
795 * target_wait_for_sess_cmds() before the session waits for outstanding
796 * I/O to complete, to avoid a race between session shutdown execution
797 * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
798 */
799 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
800}
801
802static void tcm_qla2xxx_release_session(struct kref *kref)
803{
804 struct se_session *se_sess = container_of(kref,
805 struct se_session, sess_kref);
806
807 qlt_unreg_sess(se_sess->fabric_sess_ptr);
808}
809
810static void tcm_qla2xxx_put_session(struct se_session *se_sess)
811{
812 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
813 struct qla_hw_data *ha = sess->vha->hw;
814 unsigned long flags;
815
816 spin_lock_irqsave(&ha->hardware_lock, flags);
817 kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
818 spin_unlock_irqrestore(&ha->hardware_lock, flags);
819}
820
821static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
822{
823 tcm_qla2xxx_put_session(sess->se_sess);
824}
825
826static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
827{
828 tcm_qla2xxx_shutdown_session(sess->se_sess);
829}
830
831static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
832 struct se_portal_group *se_tpg,
833 struct config_group *group,
834 const char *name)
835{
836 struct se_node_acl *se_nacl, *se_nacl_new;
837 struct tcm_qla2xxx_nacl *nacl;
838 u64 wwnn;
839 u32 qla2xxx_nexus_depth;
840
841 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
842 return ERR_PTR(-EINVAL);
843
844 se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
845 if (!se_nacl_new)
846 return ERR_PTR(-ENOMEM);
847/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
848 qla2xxx_nexus_depth = 1;
849
850 /*
851 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
852 * when converting a NodeACL from demo mode -> explict
853 */
854 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
855 name, qla2xxx_nexus_depth);
856 if (IS_ERR(se_nacl)) {
857 tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
858 return se_nacl;
859 }
860 /*
861 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
862 */
863 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
864 nacl->nport_wwnn = wwnn;
865 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
866
867 return se_nacl;
868}
869
870static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
871{
872 struct se_portal_group *se_tpg = se_acl->se_tpg;
873 struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
874 struct tcm_qla2xxx_nacl, se_node_acl);
875
876 core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
877 kfree(nacl);
878}
879
880/* Start items for tcm_qla2xxx_tpg_attrib_cit */
881
882#define DEF_QLA_TPG_ATTRIB(name) \
883 \
884static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
885 struct se_portal_group *se_tpg, \
886 char *page) \
887{ \
888 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
889 struct tcm_qla2xxx_tpg, se_tpg); \
890 \
891 return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
892} \
893 \
894static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
895 struct se_portal_group *se_tpg, \
896 const char *page, \
897 size_t count) \
898{ \
899 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
900 struct tcm_qla2xxx_tpg, se_tpg); \
901 unsigned long val; \
902 int ret; \
903 \
904 ret = kstrtoul(page, 0, &val); \
905 if (ret < 0) { \
906 pr_err("kstrtoul() failed with" \
907 " ret: %d\n", ret); \
908 return -EINVAL; \
909 } \
910 ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
911 \
912 return (!ret) ? count : -EINVAL; \
913}
914
915#define DEF_QLA_TPG_ATTR_BOOL(_name) \
916 \
917static int tcm_qla2xxx_set_attrib_##_name( \
918 struct tcm_qla2xxx_tpg *tpg, \
919 unsigned long val) \
920{ \
921 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
922 \
923 if ((val != 0) && (val != 1)) { \
924 pr_err("Illegal boolean value %lu\n", val); \
925 return -EINVAL; \
926 } \
927 \
928 a->_name = val; \
929 return 0; \
930}
931
932#define QLA_TPG_ATTR(_name, _mode) \
933 TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
934
935/*
936 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
937 */
938DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
939DEF_QLA_TPG_ATTRIB(generate_node_acls);
940QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
941
942/*
943 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
944 */
945DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
946DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
947QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
948
949/*
950 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
951 */
952DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
953DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
954QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
955
956/*
957 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
958 */
959DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
960DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
961QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
962
963static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
964 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
965 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
966 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
967 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
968 NULL,
969};
970
971/* End items for tcm_qla2xxx_tpg_attrib_cit */
972
973static ssize_t tcm_qla2xxx_tpg_show_enable(
974 struct se_portal_group *se_tpg,
975 char *page)
976{
977 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
978 struct tcm_qla2xxx_tpg, se_tpg);
979
980 return snprintf(page, PAGE_SIZE, "%d\n",
981 atomic_read(&tpg->lport_tpg_enabled));
982}
983
984static ssize_t tcm_qla2xxx_tpg_store_enable(
985 struct se_portal_group *se_tpg,
986 const char *page,
987 size_t count)
988{
989 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
990 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
991 struct tcm_qla2xxx_lport, lport_wwn);
992 struct scsi_qla_host *vha = lport->qla_vha;
993 struct qla_hw_data *ha = vha->hw;
994 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
995 struct tcm_qla2xxx_tpg, se_tpg);
996 unsigned long op;
997 int rc;
998
999 rc = kstrtoul(page, 0, &op);
1000 if (rc < 0) {
1001 pr_err("kstrtoul() returned %d\n", rc);
1002 return -EINVAL;
1003 }
1004 if ((op != 1) && (op != 0)) {
1005 pr_err("Illegal value for tpg_enable: %lu\n", op);
1006 return -EINVAL;
1007 }
1008
1009 if (op) {
1010 atomic_set(&tpg->lport_tpg_enabled, 1);
1011 qlt_enable_vha(vha);
1012 } else {
1013 if (!ha->tgt.qla_tgt) {
1014 pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
1015 return -ENODEV;
1016 }
1017 atomic_set(&tpg->lport_tpg_enabled, 0);
1018 qlt_stop_phase1(ha->tgt.qla_tgt);
1019 }
1020
1021 return count;
1022}
1023
1024TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
1025
1026static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
1027 &tcm_qla2xxx_tpg_enable.attr,
1028 NULL,
1029};
1030
1031static struct se_portal_group *tcm_qla2xxx_make_tpg(
1032 struct se_wwn *wwn,
1033 struct config_group *group,
1034 const char *name)
1035{
1036 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1037 struct tcm_qla2xxx_lport, lport_wwn);
1038 struct tcm_qla2xxx_tpg *tpg;
1039 unsigned long tpgt;
1040 int ret;
1041
1042 if (strstr(name, "tpgt_") != name)
1043 return ERR_PTR(-EINVAL);
1044 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1045 return ERR_PTR(-EINVAL);
1046
1047 if (!lport->qla_npiv_vp && (tpgt != 1)) {
1048 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
1049 return ERR_PTR(-ENOSYS);
1050 }
1051
1052 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1053 if (!tpg) {
1054 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1055 return ERR_PTR(-ENOMEM);
1056 }
1057 tpg->lport = lport;
1058 tpg->lport_tpgt = tpgt;
1059 /*
1060 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1061 * NodeACLs
1062 */
1063 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
1064 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
1065 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
1066
1067 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
1068 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1069 if (ret < 0) {
1070 kfree(tpg);
1071 return NULL;
1072 }
1073 /*
1074 * Setup local TPG=1 pointer for non NPIV mode.
1075 */
1076 if (lport->qla_npiv_vp == NULL)
1077 lport->tpg_1 = tpg;
1078
1079 return &tpg->se_tpg;
1080}
1081
1082static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
1083{
1084 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1085 struct tcm_qla2xxx_tpg, se_tpg);
1086 struct tcm_qla2xxx_lport *lport = tpg->lport;
1087 struct scsi_qla_host *vha = lport->qla_vha;
1088 struct qla_hw_data *ha = vha->hw;
1089 /*
1090 * Call into qla2x_target.c LLD logic to shutdown the active
1091 * FC Nexuses and disable target mode operation for this qla_hw_data
1092 */
1093 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
1094 qlt_stop_phase1(ha->tgt.qla_tgt);
1095
1096 core_tpg_deregister(se_tpg);
1097 /*
1098 * Clear local TPG=1 pointer for non NPIV mode.
1099 */
1100 if (lport->qla_npiv_vp == NULL)
1101 lport->tpg_1 = NULL;
1102
1103 kfree(tpg);
1104}
1105
1106static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1107 struct se_wwn *wwn,
1108 struct config_group *group,
1109 const char *name)
1110{
1111 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1112 struct tcm_qla2xxx_lport, lport_wwn);
1113 struct tcm_qla2xxx_tpg *tpg;
1114 unsigned long tpgt;
1115 int ret;
1116
1117 if (strstr(name, "tpgt_") != name)
1118 return ERR_PTR(-EINVAL);
1119 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1120 return ERR_PTR(-EINVAL);
1121
1122 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1123 if (!tpg) {
1124 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1125 return ERR_PTR(-ENOMEM);
1126 }
1127 tpg->lport = lport;
1128 tpg->lport_tpgt = tpgt;
1129
1130 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
1131 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1132 if (ret < 0) {
1133 kfree(tpg);
1134 return NULL;
1135 }
1136 return &tpg->se_tpg;
1137}
1138
1139/*
1140 * Expected to be called with struct qla_hw_data->hardware_lock held
1141 */
1142static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1143 scsi_qla_host_t *vha,
1144 const uint8_t *s_id)
1145{
1146 struct qla_hw_data *ha = vha->hw;
1147 struct tcm_qla2xxx_lport *lport;
1148 struct se_node_acl *se_nacl;
1149 struct tcm_qla2xxx_nacl *nacl;
1150 u32 key;
1151
1152 lport = ha->tgt.target_lport_ptr;
1153 if (!lport) {
1154 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1155 dump_stack();
1156 return NULL;
1157 }
1158
1159 key = (((unsigned long)s_id[0] << 16) |
1160 ((unsigned long)s_id[1] << 8) |
1161 (unsigned long)s_id[2]);
1162 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1163
1164 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
1165 if (!se_nacl) {
1166 pr_debug("Unable to locate s_id: 0x%06x\n", key);
1167 return NULL;
1168 }
1169 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
1170 se_nacl, se_nacl->initiatorname);
1171
1172 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1173 if (!nacl->qla_tgt_sess) {
1174 pr_err("Unable to locate struct qla_tgt_sess\n");
1175 return NULL;
1176 }
1177
1178 return nacl->qla_tgt_sess;
1179}
1180
1181/*
1182 * Expected to be called with struct qla_hw_data->hardware_lock held
1183 */
1184static void tcm_qla2xxx_set_sess_by_s_id(
1185 struct tcm_qla2xxx_lport *lport,
1186 struct se_node_acl *new_se_nacl,
1187 struct tcm_qla2xxx_nacl *nacl,
1188 struct se_session *se_sess,
1189 struct qla_tgt_sess *qla_tgt_sess,
1190 uint8_t *s_id)
1191{
1192 u32 key;
1193 void *slot;
1194 int rc;
1195
1196 key = (((unsigned long)s_id[0] << 16) |
1197 ((unsigned long)s_id[1] << 8) |
1198 (unsigned long)s_id[2]);
1199 pr_debug("set_sess_by_s_id: %06x\n", key);
1200
1201 slot = btree_lookup32(&lport->lport_fcport_map, key);
1202 if (!slot) {
1203 if (new_se_nacl) {
1204 pr_debug("Setting up new fc_port entry to new_se_nacl\n");
1205 nacl->nport_id = key;
1206 rc = btree_insert32(&lport->lport_fcport_map, key,
1207 new_se_nacl, GFP_ATOMIC);
1208 if (rc)
1209 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
1210 (int)key);
1211 } else {
1212 pr_debug("Wiping nonexisting fc_port entry\n");
1213 }
1214
1215 qla_tgt_sess->se_sess = se_sess;
1216 nacl->qla_tgt_sess = qla_tgt_sess;
1217 return;
1218 }
1219
1220 if (nacl->qla_tgt_sess) {
1221 if (new_se_nacl == NULL) {
1222 pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
1223 btree_remove32(&lport->lport_fcport_map, key);
1224 nacl->qla_tgt_sess = NULL;
1225 return;
1226 }
1227 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
1228 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1229 qla_tgt_sess->se_sess = se_sess;
1230 nacl->qla_tgt_sess = qla_tgt_sess;
1231 return;
1232 }
1233
1234 if (new_se_nacl == NULL) {
1235 pr_debug("Clearing existing fc_port entry\n");
1236 btree_remove32(&lport->lport_fcport_map, key);
1237 return;
1238 }
1239
1240 pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
1241 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1242 qla_tgt_sess->se_sess = se_sess;
1243 nacl->qla_tgt_sess = qla_tgt_sess;
1244
1245 pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
1246 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1247}
1248
1249/*
1250 * Expected to be called with struct qla_hw_data->hardware_lock held
1251 */
1252static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1253 scsi_qla_host_t *vha,
1254 const uint16_t loop_id)
1255{
1256 struct qla_hw_data *ha = vha->hw;
1257 struct tcm_qla2xxx_lport *lport;
1258 struct se_node_acl *se_nacl;
1259 struct tcm_qla2xxx_nacl *nacl;
1260 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1261
1262 lport = ha->tgt.target_lport_ptr;
1263 if (!lport) {
1264 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1265 dump_stack();
1266 return NULL;
1267 }
1268
1269 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1270
1271 fc_loopid = lport->lport_loopid_map + loop_id;
1272 se_nacl = fc_loopid->se_nacl;
1273 if (!se_nacl) {
1274 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
1275 loop_id);
1276 return NULL;
1277 }
1278
1279 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1280
1281 if (!nacl->qla_tgt_sess) {
1282 pr_err("Unable to locate struct qla_tgt_sess\n");
1283 return NULL;
1284 }
1285
1286 return nacl->qla_tgt_sess;
1287}
1288
1289/*
1290 * Expected to be called with struct qla_hw_data->hardware_lock held
1291 */
1292static void tcm_qla2xxx_set_sess_by_loop_id(
1293 struct tcm_qla2xxx_lport *lport,
1294 struct se_node_acl *new_se_nacl,
1295 struct tcm_qla2xxx_nacl *nacl,
1296 struct se_session *se_sess,
1297 struct qla_tgt_sess *qla_tgt_sess,
1298 uint16_t loop_id)
1299{
1300 struct se_node_acl *saved_nacl;
1301 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1302
1303 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1304
1305 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
1306 lport->lport_loopid_map)[loop_id];
1307
1308 saved_nacl = fc_loopid->se_nacl;
1309 if (!saved_nacl) {
1310 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
1311 fc_loopid->se_nacl = new_se_nacl;
1312 if (qla_tgt_sess->se_sess != se_sess)
1313 qla_tgt_sess->se_sess = se_sess;
1314 if (nacl->qla_tgt_sess != qla_tgt_sess)
1315 nacl->qla_tgt_sess = qla_tgt_sess;
1316 return;
1317 }
1318
1319 if (nacl->qla_tgt_sess) {
1320 if (new_se_nacl == NULL) {
1321 pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1322 fc_loopid->se_nacl = NULL;
1323 nacl->qla_tgt_sess = NULL;
1324 return;
1325 }
1326
1327 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1328 fc_loopid->se_nacl = new_se_nacl;
1329 if (qla_tgt_sess->se_sess != se_sess)
1330 qla_tgt_sess->se_sess = se_sess;
1331 if (nacl->qla_tgt_sess != qla_tgt_sess)
1332 nacl->qla_tgt_sess = qla_tgt_sess;
1333 return;
1334 }
1335
1336 if (new_se_nacl == NULL) {
1337 pr_debug("Clearing fc_loopid->se_nacl\n");
1338 fc_loopid->se_nacl = NULL;
1339 return;
1340 }
1341
1342 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
1343 fc_loopid->se_nacl = new_se_nacl;
1344 if (qla_tgt_sess->se_sess != se_sess)
1345 qla_tgt_sess->se_sess = se_sess;
1346 if (nacl->qla_tgt_sess != qla_tgt_sess)
1347 nacl->qla_tgt_sess = qla_tgt_sess;
1348
1349 pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
1350 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1351}
1352
1353/*
1354 * Should always be called with qla_hw_data->hardware_lock held.
1355 */
1356static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
1357 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
1358{
1359 struct se_session *se_sess = sess->se_sess;
1360 unsigned char be_sid[3];
1361
1362 be_sid[0] = sess->s_id.b.domain;
1363 be_sid[1] = sess->s_id.b.area;
1364 be_sid[2] = sess->s_id.b.al_pa;
1365
1366 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
1367 sess, be_sid);
1368 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
1369 sess, sess->loop_id);
1370}
1371
1372static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1373{
1374 struct qla_tgt *tgt = sess->tgt;
1375 struct qla_hw_data *ha = tgt->ha;
1376 struct se_session *se_sess;
1377 struct se_node_acl *se_nacl;
1378 struct tcm_qla2xxx_lport *lport;
1379 struct tcm_qla2xxx_nacl *nacl;
1380
1381 BUG_ON(in_interrupt());
1382
1383 se_sess = sess->se_sess;
1384 if (!se_sess) {
1385 pr_err("struct qla_tgt_sess->se_sess is NULL\n");
1386 dump_stack();
1387 return;
1388 }
1389 se_nacl = se_sess->se_node_acl;
1390 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1391
1392 lport = ha->tgt.target_lport_ptr;
1393 if (!lport) {
1394 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1395 dump_stack();
1396 return;
1397 }
1398 target_wait_for_sess_cmds(se_sess, 0);
1399
1400 transport_deregister_session_configfs(sess->se_sess);
1401 transport_deregister_session(sess->se_sess);
1402}
1403
1404/*
1405 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
1406 * to locate struct se_node_acl
1407 */
1408static int tcm_qla2xxx_check_initiator_node_acl(
1409 scsi_qla_host_t *vha,
1410 unsigned char *fc_wwpn,
1411 void *qla_tgt_sess,
1412 uint8_t *s_id,
1413 uint16_t loop_id)
1414{
1415 struct qla_hw_data *ha = vha->hw;
1416 struct tcm_qla2xxx_lport *lport;
1417 struct tcm_qla2xxx_tpg *tpg;
1418 struct tcm_qla2xxx_nacl *nacl;
1419 struct se_portal_group *se_tpg;
1420 struct se_node_acl *se_nacl;
1421 struct se_session *se_sess;
1422 struct qla_tgt_sess *sess = qla_tgt_sess;
1423 unsigned char port_name[36];
1424 unsigned long flags;
1425
1426 lport = ha->tgt.target_lport_ptr;
1427 if (!lport) {
1428 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1429 dump_stack();
1430 return -EINVAL;
1431 }
1432 /*
1433 * Locate the TPG=1 reference..
1434 */
1435 tpg = lport->tpg_1;
1436 if (!tpg) {
1437 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
1438 return -EINVAL;
1439 }
1440 se_tpg = &tpg->se_tpg;
1441
1442 se_sess = transport_init_session();
1443 if (IS_ERR(se_sess)) {
1444 pr_err("Unable to initialize struct se_session\n");
1445 return PTR_ERR(se_sess);
1446 }
1447 /*
1448 * Format the FCP Initiator port_name into colon seperated values to
1449 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
1450 */
1451 memset(&port_name, 0, 36);
1452 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1453 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
1454 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
1455 /*
1456 * Locate our struct se_node_acl either from an explict NodeACL created
1457 * via ConfigFS, or via running in TPG demo mode.
1458 */
1459 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
1460 port_name);
1461 if (!se_sess->se_node_acl) {
1462 transport_free_session(se_sess);
1463 return -EINVAL;
1464 }
1465 se_nacl = se_sess->se_node_acl;
1466 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1467 /*
1468 * And now setup the new se_nacl and session pointers into our HW lport
1469 * mappings for fabric S_ID and LOOP_ID.
1470 */
1471 spin_lock_irqsave(&ha->hardware_lock, flags);
1472 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
1473 qla_tgt_sess, s_id);
1474 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
1475 qla_tgt_sess, loop_id);
1476 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1477 /*
1478 * Finally register the new FC Nexus with TCM
1479 */
1480 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1481
1482 return 0;
1483}
1484
1485/*
1486 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
1487 */
1488static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1489 .handle_cmd = tcm_qla2xxx_handle_cmd,
1490 .handle_data = tcm_qla2xxx_handle_data,
1491 .handle_tmr = tcm_qla2xxx_handle_tmr,
1492 .free_cmd = tcm_qla2xxx_free_cmd,
1493 .free_mcmd = tcm_qla2xxx_free_mcmd,
1494 .free_session = tcm_qla2xxx_free_session,
1495 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
1496 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
1497 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
1498 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1499 .put_sess = tcm_qla2xxx_put_sess,
1500 .shutdown_sess = tcm_qla2xxx_shutdown_sess,
1501};
1502
1503static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
1504{
1505 int rc;
1506
1507 rc = btree_init32(&lport->lport_fcport_map);
1508 if (rc) {
1509 pr_err("Unable to initialize lport->lport_fcport_map btree\n");
1510 return rc;
1511 }
1512
1513 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
1514 65536);
1515 if (!lport->lport_loopid_map) {
1516 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
1517 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1518 btree_destroy32(&lport->lport_fcport_map);
1519 return -ENOMEM;
1520 }
1521 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
1522 * 65536);
1523 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
1524 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1525 return 0;
1526}
1527
1528static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
1529{
1530 struct qla_hw_data *ha = vha->hw;
1531 struct tcm_qla2xxx_lport *lport;
1532 /*
1533 * Setup local pointer to vha, NPIV VP pointer (if present) and
1534 * vha->tcm_lport pointer
1535 */
1536 lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
1537 lport->qla_vha = vha;
1538
1539 return 0;
1540}
1541
1542static struct se_wwn *tcm_qla2xxx_make_lport(
1543 struct target_fabric_configfs *tf,
1544 struct config_group *group,
1545 const char *name)
1546{
1547 struct tcm_qla2xxx_lport *lport;
1548 u64 wwpn;
1549 int ret = -ENODEV;
1550
1551 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
1552 return ERR_PTR(-EINVAL);
1553
1554 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1555 if (!lport) {
1556 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
1557 return ERR_PTR(-ENOMEM);
1558 }
1559 lport->lport_wwpn = wwpn;
1560 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
1561 wwpn);
1562
1563 ret = tcm_qla2xxx_init_lport(lport);
1564 if (ret != 0)
1565 goto out;
1566
1567 ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
1568 tcm_qla2xxx_lport_register_cb, lport);
1569 if (ret != 0)
1570 goto out_lport;
1571
1572 return &lport->lport_wwn;
1573out_lport:
1574 vfree(lport->lport_loopid_map);
1575 btree_destroy32(&lport->lport_fcport_map);
1576out:
1577 kfree(lport);
1578 return ERR_PTR(ret);
1579}
1580
1581static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1582{
1583 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1584 struct tcm_qla2xxx_lport, lport_wwn);
1585 struct scsi_qla_host *vha = lport->qla_vha;
1586 struct qla_hw_data *ha = vha->hw;
1587 struct se_node_acl *node;
1588 u32 key = 0;
1589
1590 /*
1591 * Call into qla2x_target.c LLD logic to complete the
1592 * shutdown of struct qla_tgt after the call to
1593 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
1594 */
1595 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
1596 qlt_stop_phase2(ha->tgt.qla_tgt);
1597
1598 qlt_lport_deregister(vha);
1599
1600 vfree(lport->lport_loopid_map);
1601 btree_for_each_safe32(&lport->lport_fcport_map, key, node)
1602 btree_remove32(&lport->lport_fcport_map, key);
1603 btree_destroy32(&lport->lport_fcport_map);
1604 kfree(lport);
1605}
1606
1607static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1608 struct target_fabric_configfs *tf,
1609 struct config_group *group,
1610 const char *name)
1611{
1612 struct tcm_qla2xxx_lport *lport;
1613 u64 npiv_wwpn, npiv_wwnn;
1614 int ret;
1615
1616 if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
1617 &npiv_wwpn, &npiv_wwnn) < 0)
1618 return ERR_PTR(-EINVAL);
1619
1620 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1621 if (!lport) {
1622 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
1623 return ERR_PTR(-ENOMEM);
1624 }
1625 lport->lport_npiv_wwpn = npiv_wwpn;
1626 lport->lport_npiv_wwnn = npiv_wwnn;
1627 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
1628 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
1629
1630/* FIXME: tcm_qla2xxx_npiv_make_lport */
1631 ret = -ENOSYS;
1632 if (ret != 0)
1633 goto out;
1634
1635 return &lport->lport_wwn;
1636out:
1637 kfree(lport);
1638 return ERR_PTR(ret);
1639}
1640
1641static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
1642{
1643 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1644 struct tcm_qla2xxx_lport, lport_wwn);
1645 struct scsi_qla_host *vha = lport->qla_vha;
1646 struct Scsi_Host *sh = vha->host;
1647 /*
1648 * Notify libfc that we want to release the lport->npiv_vport
1649 */
1650 fc_vport_terminate(lport->npiv_vport);
1651
1652 scsi_host_put(sh);
1653 kfree(lport);
1654}
1655
1656
1657static ssize_t tcm_qla2xxx_wwn_show_attr_version(
1658 struct target_fabric_configfs *tf,
1659 char *page)
1660{
1661 return sprintf(page,
1662 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1663 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1664 utsname()->machine);
1665}
1666
1667TF_WWN_ATTR_RO(tcm_qla2xxx, version);
1668
1669static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
1670 &tcm_qla2xxx_wwn_version.attr,
1671 NULL,
1672};
1673
1674static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1675 .get_fabric_name = tcm_qla2xxx_get_fabric_name,
1676 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1677 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1678 .tpg_get_tag = tcm_qla2xxx_get_tag,
1679 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1680 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1681 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1682 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1683 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
1684 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
1685 .tpg_check_demo_mode_write_protect =
1686 tcm_qla2xxx_check_demo_write_protect,
1687 .tpg_check_prod_mode_write_protect =
1688 tcm_qla2xxx_check_prod_write_protect,
1689 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1690 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1691 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1692 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1693 .new_cmd_map = NULL,
1694 .check_stop_free = tcm_qla2xxx_check_stop_free,
1695 .release_cmd = tcm_qla2xxx_release_cmd,
1696 .put_session = tcm_qla2xxx_put_session,
1697 .shutdown_session = tcm_qla2xxx_shutdown_session,
1698 .close_session = tcm_qla2xxx_close_session,
1699 .sess_get_index = tcm_qla2xxx_sess_get_index,
1700 .sess_get_initiator_sid = NULL,
1701 .write_pending = tcm_qla2xxx_write_pending,
1702 .write_pending_status = tcm_qla2xxx_write_pending_status,
1703 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1704 .get_task_tag = tcm_qla2xxx_get_task_tag,
1705 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1706 .queue_data_in = tcm_qla2xxx_queue_data_in,
1707 .queue_status = tcm_qla2xxx_queue_status,
1708 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1709 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1710 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1711 /*
1712 * Setup function pointers for generic logic in
1713 * target_core_fabric_configfs.c
1714 */
1715 .fabric_make_wwn = tcm_qla2xxx_make_lport,
1716 .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
1717 .fabric_make_tpg = tcm_qla2xxx_make_tpg,
1718 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1719 .fabric_post_link = NULL,
1720 .fabric_pre_unlink = NULL,
1721 .fabric_make_np = NULL,
1722 .fabric_drop_np = NULL,
1723 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1724 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1725};
1726
1727static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1728 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
1729 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1730 .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
1731 .tpg_get_tag = tcm_qla2xxx_get_tag,
1732 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1733 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1734 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1735 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1736 .tpg_check_demo_mode = tcm_qla2xxx_check_false,
1737 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
1738 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
1739 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
1740 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1741 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1742 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1743 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1744 .release_cmd = tcm_qla2xxx_release_cmd,
1745 .put_session = tcm_qla2xxx_put_session,
1746 .shutdown_session = tcm_qla2xxx_shutdown_session,
1747 .close_session = tcm_qla2xxx_close_session,
1748 .sess_get_index = tcm_qla2xxx_sess_get_index,
1749 .sess_get_initiator_sid = NULL,
1750 .write_pending = tcm_qla2xxx_write_pending,
1751 .write_pending_status = tcm_qla2xxx_write_pending_status,
1752 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1753 .get_task_tag = tcm_qla2xxx_get_task_tag,
1754 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1755 .queue_data_in = tcm_qla2xxx_queue_data_in,
1756 .queue_status = tcm_qla2xxx_queue_status,
1757 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1758 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1759 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1760 /*
1761 * Setup function pointers for generic logic in
1762 * target_core_fabric_configfs.c
1763 */
1764 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
1765 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
1766 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
1767 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1768 .fabric_post_link = NULL,
1769 .fabric_pre_unlink = NULL,
1770 .fabric_make_np = NULL,
1771 .fabric_drop_np = NULL,
1772 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1773 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1774};
1775
1776static int tcm_qla2xxx_register_configfs(void)
1777{
1778 struct target_fabric_configfs *fabric, *npiv_fabric;
1779 int ret;
1780
1781 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
1782 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1783 utsname()->machine);
1784 /*
1785 * Register the top level struct config_item_type with TCM core
1786 */
1787 fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
1788 if (IS_ERR(fabric)) {
1789 pr_err("target_fabric_configfs_init() failed\n");
1790 return PTR_ERR(fabric);
1791 }
1792 /*
1793 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
1794 */
1795 fabric->tf_ops = tcm_qla2xxx_ops;
1796 /*
1797 * Setup default attribute lists for various fabric->tf_cit_tmpl
1798 */
1799 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1800 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
1801 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
1802 tcm_qla2xxx_tpg_attrib_attrs;
1803 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1804 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1805 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1806 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1807 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1808 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1809 /*
1810 * Register the fabric for use within TCM
1811 */
1812 ret = target_fabric_configfs_register(fabric);
1813 if (ret < 0) {
1814 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1815 return ret;
1816 }
1817 /*
1818 * Setup our local pointer to *fabric
1819 */
1820 tcm_qla2xxx_fabric_configfs = fabric;
1821 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
1822
1823 /*
1824 * Register the top level struct config_item_type for NPIV with TCM core
1825 */
1826 npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
1827 if (IS_ERR(npiv_fabric)) {
1828 pr_err("target_fabric_configfs_init() failed\n");
1829 ret = PTR_ERR(npiv_fabric);
1830 goto out_fabric;
1831 }
1832 /*
1833 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
1834 */
1835 npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
1836 /*
1837 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
1838 */
1839 TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1840 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
1841 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1842 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1843 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1844 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1845 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1846 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1847 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1848 /*
1849 * Register the npiv_fabric for use within TCM
1850 */
1851 ret = target_fabric_configfs_register(npiv_fabric);
1852 if (ret < 0) {
1853 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1854 goto out_fabric;
1855 }
1856 /*
1857 * Setup our local pointer to *npiv_fabric
1858 */
1859 tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
1860 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
1861
1862 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
1863 WQ_MEM_RECLAIM, 0);
1864 if (!tcm_qla2xxx_free_wq) {
1865 ret = -ENOMEM;
1866 goto out_fabric_npiv;
1867 }
1868
1869 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
1870 if (!tcm_qla2xxx_cmd_wq) {
1871 ret = -ENOMEM;
1872 goto out_free_wq;
1873 }
1874
1875 return 0;
1876
1877out_free_wq:
1878 destroy_workqueue(tcm_qla2xxx_free_wq);
1879out_fabric_npiv:
1880 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1881out_fabric:
1882 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1883 return ret;
1884}
1885
1886static void tcm_qla2xxx_deregister_configfs(void)
1887{
1888 destroy_workqueue(tcm_qla2xxx_cmd_wq);
1889 destroy_workqueue(tcm_qla2xxx_free_wq);
1890
1891 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1892 tcm_qla2xxx_fabric_configfs = NULL;
1893 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
1894
1895 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1896 tcm_qla2xxx_npiv_fabric_configfs = NULL;
1897 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
1898}
1899
1900static int __init tcm_qla2xxx_init(void)
1901{
1902 int ret;
1903
1904 ret = tcm_qla2xxx_register_configfs();
1905 if (ret < 0)
1906 return ret;
1907
1908 return 0;
1909}
1910
1911static void __exit tcm_qla2xxx_exit(void)
1912{
1913 tcm_qla2xxx_deregister_configfs();
1914}
1915
1916MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
1917MODULE_LICENSE("GPL");
1918module_init(tcm_qla2xxx_init);
1919module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 000000000000..825498103352
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,82 @@
1#include <target/target_core_base.h>
2#include <linux/btree.h>
3
4#define TCM_QLA2XXX_VERSION "v0.1"
5/* length of ASCII WWPNs including pad */
6#define TCM_QLA2XXX_NAMELEN 32
7/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
8#define TCM_QLA2XXX_NPIV_NAMELEN 66
9
10#include "qla_target.h"
11
12struct tcm_qla2xxx_nacl {
13 /* From libfc struct fc_rport->port_id */
14 u32 nport_id;
15 /* Binary World Wide unique Node Name for remote FC Initiator Nport */
16 u64 nport_wwnn;
17 /* ASCII formatted WWPN for FC Initiator Nport */
18 char nport_name[TCM_QLA2XXX_NAMELEN];
19 /* Pointer to qla_tgt_sess */
20 struct qla_tgt_sess *qla_tgt_sess;
21 /* Pointer to TCM FC nexus */
22 struct se_session *nport_nexus;
23 /* Returned by tcm_qla2xxx_make_nodeacl() */
24 struct se_node_acl se_node_acl;
25};
26
27struct tcm_qla2xxx_tpg_attrib {
28 int generate_node_acls;
29 int cache_dynamic_acls;
30 int demo_mode_write_protect;
31 int prod_mode_write_protect;
32};
33
34struct tcm_qla2xxx_tpg {
35 /* FC lport target portal group tag for TCM */
36 u16 lport_tpgt;
37 /* Atomic bit to determine TPG active status */
38 atomic_t lport_tpg_enabled;
39 /* Pointer back to tcm_qla2xxx_lport */
40 struct tcm_qla2xxx_lport *lport;
41 /* Used by tcm_qla2xxx_tpg_attrib_cit */
42 struct tcm_qla2xxx_tpg_attrib tpg_attrib;
43 /* Returned by tcm_qla2xxx_make_tpg() */
44 struct se_portal_group se_tpg;
45};
46
47#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
48
49struct tcm_qla2xxx_fc_loopid {
50 struct se_node_acl *se_nacl;
51};
52
53struct tcm_qla2xxx_lport {
54 /* SCSI protocol the lport is providing */
55 u8 lport_proto_id;
56 /* Binary World Wide unique Port Name for FC Target Lport */
57 u64 lport_wwpn;
58 /* Binary World Wide unique Port Name for FC NPIV Target Lport */
59 u64 lport_npiv_wwpn;
60 /* Binary World Wide unique Node Name for FC NPIV Target Lport */
61 u64 lport_npiv_wwnn;
62 /* ASCII formatted WWPN for FC Target Lport */
63 char lport_name[TCM_QLA2XXX_NAMELEN];
64 /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
65 char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
66 /* map for fc_port pointers in 24-bit FC Port ID space */
67 struct btree_head32 lport_fcport_map;
68 /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
69 struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
70 /* Pointer to struct scsi_qla_host from qla2xxx LLD */
71 struct scsi_qla_host *qla_vha;
72 /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
73 struct scsi_qla_host *qla_npiv_vp;
74 /* Pointer to struct qla_tgt pointer */
75 struct qla_tgt lport_qla_tgt;
76 /* Pointer to struct fc_vport for NPIV vport from libfc */
77 struct fc_vport *npiv_vport;
78 /* Pointer to TPG=1 for non NPIV mode */
79 struct tcm_qla2xxx_tpg *tpg_1;
80 /* Returned by tcm_qla2xxx_make_lport() */
81 struct se_wwn lport_wwn;
82};
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 0b0a7d42137d..c681b2a355e1 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -9,6 +9,140 @@
9#include "ql4_glbl.h" 9#include "ql4_glbl.h"
10#include "ql4_dbg.h" 10#include "ql4_dbg.h"
11 11
12static ssize_t
13qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
14 struct bin_attribute *ba, char *buf, loff_t off,
15 size_t count)
16{
17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
18 struct device, kobj)));
19
20 if (!is_qla8022(ha))
21 return -EINVAL;
22
23 if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
24 return 0;
25
26 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
27 ha->fw_dump_size);
28}
29
30static ssize_t
31qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
32 struct bin_attribute *ba, char *buf, loff_t off,
33 size_t count)
34{
35 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
36 struct device, kobj)));
37 uint32_t dev_state;
38 long reading;
39 int ret = 0;
40
41 if (!is_qla8022(ha))
42 return -EINVAL;
43
44 if (off != 0)
45 return ret;
46
47 buf[1] = 0;
48 ret = kstrtol(buf, 10, &reading);
49 if (ret) {
50 ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
51 __func__, ret);
52 return ret;
53 }
54
55 switch (reading) {
56 case 0:
57 /* clear dump collection flags */
58 if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
59 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
60 /* Reload minidump template */
61 qla4xxx_alloc_fw_dump(ha);
62 DEBUG2(ql4_printk(KERN_INFO, ha,
63 "Firmware template reloaded\n"));
64 }
65 break;
66 case 1:
67 /* Set flag to read dump */
68 if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
69 !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
70 set_bit(AF_82XX_DUMP_READING, &ha->flags);
71 DEBUG2(ql4_printk(KERN_INFO, ha,
72 "Raw firmware dump ready for read on (%ld).\n",
73 ha->host_no));
74 }
75 break;
76 case 2:
77 /* Reset HBA */
78 qla4_8xxx_idc_lock(ha);
79 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
80 if (dev_state == QLA82XX_DEV_READY) {
81 ql4_printk(KERN_INFO, ha,
82 "%s: Setting Need reset, reset_owner is 0x%x.\n",
83 __func__, ha->func_num);
84 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
85 QLA82XX_DEV_NEED_RESET);
86 set_bit(AF_82XX_RST_OWNER, &ha->flags);
87 } else
88 ql4_printk(KERN_INFO, ha,
89 "%s: Reset not performed as device state is 0x%x\n",
90 __func__, dev_state);
91
92 qla4_8xxx_idc_unlock(ha);
93 break;
94 default:
95 /* do nothing */
96 break;
97 }
98
99 return count;
100}
101
102static struct bin_attribute sysfs_fw_dump_attr = {
103 .attr = {
104 .name = "fw_dump",
105 .mode = S_IRUSR | S_IWUSR,
106 },
107 .size = 0,
108 .read = qla4_8xxx_sysfs_read_fw_dump,
109 .write = qla4_8xxx_sysfs_write_fw_dump,
110};
111
112static struct sysfs_entry {
113 char *name;
114 struct bin_attribute *attr;
115} bin_file_entries[] = {
116 { "fw_dump", &sysfs_fw_dump_attr },
117 { NULL },
118};
119
120void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
121{
122 struct Scsi_Host *host = ha->host;
123 struct sysfs_entry *iter;
124 int ret;
125
126 for (iter = bin_file_entries; iter->name; iter++) {
127 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
128 iter->attr);
129 if (ret)
130 ql4_printk(KERN_ERR, ha,
131 "Unable to create sysfs %s binary attribute (%d).\n",
132 iter->name, ret);
133 }
134}
135
136void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
137{
138 struct Scsi_Host *host = ha->host;
139 struct sysfs_entry *iter;
140
141 for (iter = bin_file_entries; iter->name; iter++)
142 sysfs_remove_bin_file(&host->shost_gendev.kobj,
143 iter->attr);
144}
145
12/* Scsi_Host attributes. */ 146/* Scsi_Host attributes. */
13static ssize_t 147static ssize_t
14qla4xxx_fw_version_show(struct device *dev, 148qla4xxx_fw_version_show(struct device *dev,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7f2492e88be7..96a5616a8fda 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -398,6 +398,16 @@ struct isp_operations {
398 int (*get_sys_info) (struct scsi_qla_host *); 398 int (*get_sys_info) (struct scsi_qla_host *);
399}; 399};
400 400
401struct ql4_mdump_size_table {
402 uint32_t size;
403 uint32_t size_cmask_02;
404 uint32_t size_cmask_04;
405 uint32_t size_cmask_08;
406 uint32_t size_cmask_10;
407 uint32_t size_cmask_FF;
408 uint32_t version;
409};
410
401/*qla4xxx ipaddress configuration details */ 411/*qla4xxx ipaddress configuration details */
402struct ipaddress_config { 412struct ipaddress_config {
403 uint16_t ipv4_options; 413 uint16_t ipv4_options;
@@ -485,6 +495,10 @@ struct scsi_qla_host {
485#define AF_EEH_BUSY 20 /* 0x00100000 */ 495#define AF_EEH_BUSY 20 /* 0x00100000 */
486#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */ 496#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
487#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */ 497#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
498#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
499#define AF_82XX_RST_OWNER 25 /* 0x02000000 */
500#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
501
488 unsigned long dpc_flags; 502 unsigned long dpc_flags;
489 503
490#define DPC_RESET_HA 1 /* 0x00000002 */ 504#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -662,6 +676,11 @@ struct scsi_qla_host {
662 676
663 uint32_t nx_dev_init_timeout; 677 uint32_t nx_dev_init_timeout;
664 uint32_t nx_reset_timeout; 678 uint32_t nx_reset_timeout;
679 void *fw_dump;
680 uint32_t fw_dump_size;
681 uint32_t fw_dump_capture_mask;
682 void *fw_dump_tmplt_hdr;
683 uint32_t fw_dump_tmplt_size;
665 684
666 struct completion mbx_intr_comp; 685 struct completion mbx_intr_comp;
667 686
@@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
936#define PROCESS_ALL_AENS 0 955#define PROCESS_ALL_AENS 0
937#define FLUSH_DDB_CHANGED_AENS 1 956#define FLUSH_DDB_CHANGED_AENS 1
938 957
958/* Defines for udev events */
959#define QL4_UEVENT_CODE_FW_DUMP 0
960
939#endif /*_QLA4XXX_H */ 961#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 210cd1d64475..7240948fb929 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -385,6 +385,11 @@ struct qla_flt_region {
385#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091 385#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
386#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092 386#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
387#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093 387#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
388#define MBOX_CMD_MINIDUMP 0x0129
389
390/* Minidump subcommand */
391#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00
392#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01
388 393
389/* Mailbox 1 */ 394/* Mailbox 1 */
390#define FW_STATE_READY 0x0000 395#define FW_STATE_READY 0x0000
@@ -1190,4 +1195,27 @@ struct ql_iscsi_stats {
1190 uint8_t reserved2[264]; /* 0x0308 - 0x040F */ 1195 uint8_t reserved2[264]; /* 0x0308 - 0x040F */
1191}; 1196};
1192 1197
1198#define QLA82XX_DBG_STATE_ARRAY_LEN 16
1199#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
1200#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
1201
1202struct qla4_8xxx_minidump_template_hdr {
1203 uint32_t entry_type;
1204 uint32_t first_entry_offset;
1205 uint32_t size_of_template;
1206 uint32_t capture_debug_level;
1207 uint32_t num_of_entries;
1208 uint32_t version;
1209 uint32_t driver_timestamp;
1210 uint32_t checksum;
1211
1212 uint32_t driver_capture_mask;
1213 uint32_t driver_info_word2;
1214 uint32_t driver_info_word3;
1215 uint32_t driver_info_word4;
1216
1217 uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
1218 uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
1219};
1220
1193#endif /* _QLA4X_FW_H */ 1221#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 910536667cf5..20b49d019043 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job);
196int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job); 196int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
197 197
198void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry); 198void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
199int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
200 dma_addr_t phys_addr);
201int qla4xxx_req_template_size(struct scsi_qla_host *ha);
202void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
203void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
204void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
199 205
200extern int ql4xextended_error_logging; 206extern int ql4xextended_error_logging;
201extern int ql4xdontresethba; 207extern int ql4xdontresethba;
202extern int ql4xenablemsix; 208extern int ql4xenablemsix;
209extern int ql4xmdcapmask;
210extern int ql4xenablemd;
203 211
204extern struct device_attribute *qla4xxx_host_attrs[]; 212extern struct device_attribute *qla4xxx_host_attrs[];
205#endif /* _QLA4x_GBL_H */ 213#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90ee5d8fa731..bf36723b84e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
277 return ipv4_wait|ipv6_wait; 277 return ipv4_wait|ipv6_wait;
278} 278}
279 279
280/**
281 * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
282 * @ha: pointer to host adapter structure.
283 **/
284void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
285{
286 int status;
287 uint32_t capture_debug_level;
288 int hdr_entry_bit, k;
289 void *md_tmp;
290 dma_addr_t md_tmp_dma;
291 struct qla4_8xxx_minidump_template_hdr *md_hdr;
292
293 if (ha->fw_dump) {
294 ql4_printk(KERN_WARNING, ha,
295 "Firmware dump previously allocated.\n");
296 return;
297 }
298
299 status = qla4xxx_req_template_size(ha);
300 if (status != QLA_SUCCESS) {
301 ql4_printk(KERN_INFO, ha,
302 "scsi%ld: Failed to get template size\n",
303 ha->host_no);
304 return;
305 }
306
307 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
308
309 /* Allocate memory for saving the template */
310 md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
311 &md_tmp_dma, GFP_KERNEL);
312
313 /* Request template */
314 status = qla4xxx_get_minidump_template(ha, md_tmp_dma);
315 if (status != QLA_SUCCESS) {
316 ql4_printk(KERN_INFO, ha,
317 "scsi%ld: Failed to get minidump template\n",
318 ha->host_no);
319 goto alloc_cleanup;
320 }
321
322 md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
323
324 capture_debug_level = md_hdr->capture_debug_level;
325
326 /* Get capture mask based on module loadtime setting. */
327 if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
328 ha->fw_dump_capture_mask = ql4xmdcapmask;
329 else
330 ha->fw_dump_capture_mask = capture_debug_level;
331
332 md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
333
334 DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
335 md_hdr->num_of_entries));
336 DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n",
337 ha->fw_dump_tmplt_size));
338 DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
339 ha->fw_dump_capture_mask));
340
341 /* Calculate fw_dump_size */
342 for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
343 hdr_entry_bit <<= 1, k++) {
344 if (hdr_entry_bit & ha->fw_dump_capture_mask)
345 ha->fw_dump_size += md_hdr->capture_size_array[k];
346 }
347
348 /* Total firmware dump size including command header */
349 ha->fw_dump_size += ha->fw_dump_tmplt_size;
350 ha->fw_dump = vmalloc(ha->fw_dump_size);
351 if (!ha->fw_dump)
352 goto alloc_cleanup;
353
354 DEBUG2(ql4_printk(KERN_INFO, ha,
355 "Minidump Tempalate Size = 0x%x KB\n",
356 ha->fw_dump_tmplt_size));
357 DEBUG2(ql4_printk(KERN_INFO, ha,
358 "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
359
360 memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
361 ha->fw_dump_tmplt_hdr = ha->fw_dump;
362
363alloc_cleanup:
364 dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
365 md_tmp, md_tmp_dma);
366}
367
280static int qla4xxx_fw_ready(struct scsi_qla_host *ha) 368static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
281{ 369{
282 uint32_t timeout_count; 370 uint32_t timeout_count;
@@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
445 "control block\n", ha->host_no, __func__)); 533 "control block\n", ha->host_no, __func__));
446 return status; 534 return status;
447 } 535 }
536
448 if (!qla4xxx_fw_ready(ha)) 537 if (!qla4xxx_fw_ready(ha))
449 return status; 538 return status;
450 539
540 if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
541 qla4xxx_alloc_fw_dump(ha);
542
451 return qla4xxx_get_firmware_status(ha); 543 return qla4xxx_get_firmware_status(ha);
452} 544}
453 545
@@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
884 switch (state) { 976 switch (state) {
885 case DDB_DS_SESSION_ACTIVE: 977 case DDB_DS_SESSION_ACTIVE:
886 case DDB_DS_DISCOVERY: 978 case DDB_DS_DISCOVERY:
887 ddb_entry->unblock_sess(ddb_entry->sess);
888 qla4xxx_update_session_conn_param(ha, ddb_entry); 979 qla4xxx_update_session_conn_param(ha, ddb_entry);
980 ddb_entry->unblock_sess(ddb_entry->sess);
889 status = QLA_SUCCESS; 981 status = QLA_SUCCESS;
890 break; 982 break;
891 case DDB_DS_SESSION_FAILED: 983 case DDB_DS_SESSION_FAILED:
@@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
897 } 989 }
898 break; 990 break;
899 case DDB_DS_SESSION_ACTIVE: 991 case DDB_DS_SESSION_ACTIVE:
992 case DDB_DS_DISCOVERY:
900 switch (state) { 993 switch (state) {
901 case DDB_DS_SESSION_FAILED: 994 case DDB_DS_SESSION_FAILED:
902 /* 995 /*
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 7ac21dabbf22..cab8f665a41f 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
51 } 51 }
52 } 52 }
53 53
54 if (is_qla8022(ha)) {
55 if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
56 DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
57 "prematurely completing mbx cmd as firmware "
58 "recovery detected\n", ha->host_no, __func__));
59 return status;
60 }
61 /* Do not send any mbx cmd if h/w is in failed state*/
62 qla4_8xxx_idc_lock(ha);
63 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
64 qla4_8xxx_idc_unlock(ha);
65 if (dev_state == QLA82XX_DEV_FAILED) {
66 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
67 "failed state, do not send any mailbox commands\n",
68 ha->host_no, __func__);
69 return status;
70 }
71 }
72
73 if ((is_aer_supported(ha)) && 54 if ((is_aer_supported(ha)) &&
74 (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) { 55 (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
75 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, " 56 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
96 msleep(10); 77 msleep(10);
97 } 78 }
98 79
80 if (is_qla8022(ha)) {
81 if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
82 DEBUG2(ql4_printk(KERN_WARNING, ha,
83 "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
84 ha->host_no, __func__));
85 goto mbox_exit;
86 }
87 /* Do not send any mbx cmd if h/w is in failed state*/
88 qla4_8xxx_idc_lock(ha);
89 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
90 qla4_8xxx_idc_unlock(ha);
91 if (dev_state == QLA82XX_DEV_FAILED) {
92 ql4_printk(KERN_WARNING, ha,
93 "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
94 ha->host_no, __func__);
95 goto mbox_exit;
96 }
97 }
98
99 spin_lock_irqsave(&ha->hardware_lock, flags); 99 spin_lock_irqsave(&ha->hardware_lock, flags);
100 100
101 ha->mbox_status_count = outCount; 101 ha->mbox_status_count = outCount;
@@ -270,6 +270,79 @@ mbox_exit:
270 return status; 270 return status;
271} 271}
272 272
273/**
274 * qla4xxx_get_minidump_template - Get the firmware template
275 * @ha: Pointer to host adapter structure.
276 * @phys_addr: dma address for template
277 *
278 * Obtain the minidump template from firmware during initialization
279 * as it may not be available when minidump is desired.
280 **/
281int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
282 dma_addr_t phys_addr)
283{
284 uint32_t mbox_cmd[MBOX_REG_COUNT];
285 uint32_t mbox_sts[MBOX_REG_COUNT];
286 int status;
287
288 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
289 memset(&mbox_sts, 0, sizeof(mbox_sts));
290
291 mbox_cmd[0] = MBOX_CMD_MINIDUMP;
292 mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
293 mbox_cmd[2] = LSDW(phys_addr);
294 mbox_cmd[3] = MSDW(phys_addr);
295 mbox_cmd[4] = ha->fw_dump_tmplt_size;
296 mbox_cmd[5] = 0;
297
298 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
299 &mbox_sts[0]);
300 if (status != QLA_SUCCESS) {
301 DEBUG2(ql4_printk(KERN_INFO, ha,
302 "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
303 ha->host_no, __func__, mbox_cmd[0],
304 mbox_sts[0], mbox_sts[1]));
305 }
306 return status;
307}
308
309/**
310 * qla4xxx_req_template_size - Get minidump template size from firmware.
311 * @ha: Pointer to host adapter structure.
312 **/
313int qla4xxx_req_template_size(struct scsi_qla_host *ha)
314{
315 uint32_t mbox_cmd[MBOX_REG_COUNT];
316 uint32_t mbox_sts[MBOX_REG_COUNT];
317 int status;
318
319 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
320 memset(&mbox_sts, 0, sizeof(mbox_sts));
321
322 mbox_cmd[0] = MBOX_CMD_MINIDUMP;
323 mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
324
325 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
326 &mbox_sts[0]);
327 if (status == QLA_SUCCESS) {
328 ha->fw_dump_tmplt_size = mbox_sts[1];
329 DEBUG2(ql4_printk(KERN_INFO, ha,
330 "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
331 __func__, mbox_sts[0], mbox_sts[1],
332 mbox_sts[2], mbox_sts[3], mbox_sts[4],
333 mbox_sts[5], mbox_sts[6], mbox_sts[7]));
334 if (ha->fw_dump_tmplt_size == 0)
335 status = QLA_ERROR;
336 } else {
337 ql4_printk(KERN_WARNING, ha,
338 "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
339 __func__, mbox_sts[0], mbox_sts[1]);
340 status = QLA_ERROR;
341 }
342
343 return status;
344}
345
273void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha) 346void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
274{ 347{
275 set_bit(AF_FW_RECOVERY, &ha->flags); 348 set_bit(AF_FW_RECOVERY, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e1e46b6dac75..228b67020d2c 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -7,6 +7,7 @@
7#include <linux/delay.h> 7#include <linux/delay.h>
8#include <linux/io.h> 8#include <linux/io.h>
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/ratelimit.h>
10#include "ql4_def.h" 11#include "ql4_def.h"
11#include "ql4_glbl.h" 12#include "ql4_glbl.h"
12 13
@@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
420 return data; 421 return data;
421} 422}
422 423
424/* Minidump related functions */
425static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
426 u32 data, uint8_t flag)
427{
428 uint32_t win_read, off_value, rval = QLA_SUCCESS;
429
430 off_value = off & 0xFFFF0000;
431 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
432
433 /* Read back value to make sure write has gone through before trying
434 * to use it.
435 */
436 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
437 if (win_read != off_value) {
438 DEBUG2(ql4_printk(KERN_INFO, ha,
439 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
440 __func__, off_value, win_read, off));
441 return QLA_ERROR;
442 }
443
444 off_value = off & 0x0000FFFF;
445
446 if (flag)
447 writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
448 ha->nx_pcibase));
449 else
450 rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
451 ha->nx_pcibase));
452
453 return rval;
454}
455
423#define CRB_WIN_LOCK_TIMEOUT 100000000 456#define CRB_WIN_LOCK_TIMEOUT 100000000
424 457
425int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha) 458int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1252 } 1285 }
1253 1286
1254 if (j >= MAX_CTL_CHECK) { 1287 if (j >= MAX_CTL_CHECK) {
1255 if (printk_ratelimit()) 1288 printk_ratelimited(KERN_ERR
1256 ql4_printk(KERN_ERR, ha, 1289 "%s: failed to read through agent\n",
1257 "failed to read through agent\n"); 1290 __func__);
1258 break; 1291 break;
1259 } 1292 }
1260 1293
@@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1390 if (j >= MAX_CTL_CHECK) { 1423 if (j >= MAX_CTL_CHECK) {
1391 if (printk_ratelimit()) 1424 if (printk_ratelimit())
1392 ql4_printk(KERN_ERR, ha, 1425 ql4_printk(KERN_ERR, ha,
1393 "failed to write through agent\n"); 1426 "%s: failed to read through agent\n",
1427 __func__);
1394 ret = -1; 1428 ret = -1;
1395 break; 1429 break;
1396 } 1430 }
@@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1462 1496
1463 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1497 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1464 drv_active |= (1 << (ha->func_num * 4)); 1498 drv_active |= (1 << (ha->func_num * 4));
1499 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1500 __func__, ha->host_no, drv_active);
1465 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1501 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
1466} 1502}
1467 1503
@@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
1472 1508
1473 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1509 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1474 drv_active &= ~(1 << (ha->func_num * 4)); 1510 drv_active &= ~(1 << (ha->func_num * 4));
1511 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1512 __func__, ha->host_no, drv_active);
1475 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1513 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
1476} 1514}
1477 1515
@@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1497 1535
1498 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1536 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1499 drv_state |= (1 << (ha->func_num * 4)); 1537 drv_state |= (1 << (ha->func_num * 4));
1538 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1539 __func__, ha->host_no, drv_state);
1500 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1540 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
1501} 1541}
1502 1542
@@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1507 1547
1508 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1548 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1509 drv_state &= ~(1 << (ha->func_num * 4)); 1549 drv_state &= ~(1 << (ha->func_num * 4));
1550 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1551 __func__, ha->host_no, drv_state);
1510 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1552 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
1511} 1553}
1512 1554
@@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
1601 qla4_8xxx_rom_unlock(ha); 1643 qla4_8xxx_rom_unlock(ha);
1602} 1644}
1603 1645
1646static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1647 struct qla82xx_minidump_entry_hdr *entry_hdr,
1648 uint32_t **d_ptr)
1649{
1650 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1651 struct qla82xx_minidump_entry_crb *crb_hdr;
1652 uint32_t *data_ptr = *d_ptr;
1653
1654 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1655 crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
1656 r_addr = crb_hdr->addr;
1657 r_stride = crb_hdr->crb_strd.addr_stride;
1658 loop_cnt = crb_hdr->op_count;
1659
1660 for (i = 0; i < loop_cnt; i++) {
1661 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
1662 *data_ptr++ = cpu_to_le32(r_addr);
1663 *data_ptr++ = cpu_to_le32(r_value);
1664 r_addr += r_stride;
1665 }
1666 *d_ptr = data_ptr;
1667}
1668
1669static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1670 struct qla82xx_minidump_entry_hdr *entry_hdr,
1671 uint32_t **d_ptr)
1672{
1673 uint32_t addr, r_addr, c_addr, t_r_addr;
1674 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1675 unsigned long p_wait, w_time, p_mask;
1676 uint32_t c_value_w, c_value_r;
1677 struct qla82xx_minidump_entry_cache *cache_hdr;
1678 int rval = QLA_ERROR;
1679 uint32_t *data_ptr = *d_ptr;
1680
1681 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1682 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
1683
1684 loop_count = cache_hdr->op_count;
1685 r_addr = cache_hdr->read_addr;
1686 c_addr = cache_hdr->control_addr;
1687 c_value_w = cache_hdr->cache_ctrl.write_value;
1688
1689 t_r_addr = cache_hdr->tag_reg_addr;
1690 t_value = cache_hdr->addr_ctrl.init_tag_value;
1691 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
1692 p_wait = cache_hdr->cache_ctrl.poll_wait;
1693 p_mask = cache_hdr->cache_ctrl.poll_mask;
1694
1695 for (i = 0; i < loop_count; i++) {
1696 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
1697
1698 if (c_value_w)
1699 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
1700
1701 if (p_mask) {
1702 w_time = jiffies + p_wait;
1703 do {
1704 c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
1705 0, 0);
1706 if ((c_value_r & p_mask) == 0) {
1707 break;
1708 } else if (time_after_eq(jiffies, w_time)) {
1709 /* capturing dump failed */
1710 return rval;
1711 }
1712 } while (1);
1713 }
1714
1715 addr = r_addr;
1716 for (k = 0; k < r_cnt; k++) {
1717 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
1718 *data_ptr++ = cpu_to_le32(r_value);
1719 addr += cache_hdr->read_ctrl.read_addr_stride;
1720 }
1721
1722 t_value += cache_hdr->addr_ctrl.tag_value_stride;
1723 }
1724 *d_ptr = data_ptr;
1725 return QLA_SUCCESS;
1726}
1727
1728static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1729 struct qla82xx_minidump_entry_hdr *entry_hdr)
1730{
1731 struct qla82xx_minidump_entry_crb *crb_entry;
1732 uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
1733 uint32_t crb_addr;
1734 unsigned long wtime;
1735 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
1736 int i;
1737
1738 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1739 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1740 ha->fw_dump_tmplt_hdr;
1741 crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
1742
1743 crb_addr = crb_entry->addr;
1744 for (i = 0; i < crb_entry->op_count; i++) {
1745 opcode = crb_entry->crb_ctrl.opcode;
1746 if (opcode & QLA82XX_DBG_OPCODE_WR) {
1747 qla4_8xxx_md_rw_32(ha, crb_addr,
1748 crb_entry->value_1, 1);
1749 opcode &= ~QLA82XX_DBG_OPCODE_WR;
1750 }
1751 if (opcode & QLA82XX_DBG_OPCODE_RW) {
1752 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1753 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
1754 opcode &= ~QLA82XX_DBG_OPCODE_RW;
1755 }
1756 if (opcode & QLA82XX_DBG_OPCODE_AND) {
1757 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1758 read_value &= crb_entry->value_2;
1759 opcode &= ~QLA82XX_DBG_OPCODE_AND;
1760 if (opcode & QLA82XX_DBG_OPCODE_OR) {
1761 read_value |= crb_entry->value_3;
1762 opcode &= ~QLA82XX_DBG_OPCODE_OR;
1763 }
1764 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
1765 }
1766 if (opcode & QLA82XX_DBG_OPCODE_OR) {
1767 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1768 read_value |= crb_entry->value_3;
1769 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
1770 opcode &= ~QLA82XX_DBG_OPCODE_OR;
1771 }
1772 if (opcode & QLA82XX_DBG_OPCODE_POLL) {
1773 poll_time = crb_entry->crb_strd.poll_timeout;
1774 wtime = jiffies + poll_time;
1775 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1776
1777 do {
1778 if ((read_value & crb_entry->value_2) ==
1779 crb_entry->value_1)
1780 break;
1781 else if (time_after_eq(jiffies, wtime)) {
1782 /* capturing dump failed */
1783 rval = QLA_ERROR;
1784 break;
1785 } else
1786 read_value = qla4_8xxx_md_rw_32(ha,
1787 crb_addr, 0, 0);
1788 } while (1);
1789 opcode &= ~QLA82XX_DBG_OPCODE_POLL;
1790 }
1791
1792 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
1793 if (crb_entry->crb_strd.state_index_a) {
1794 index = crb_entry->crb_strd.state_index_a;
1795 addr = tmplt_hdr->saved_state_array[index];
1796 } else {
1797 addr = crb_addr;
1798 }
1799
1800 read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
1801 index = crb_entry->crb_ctrl.state_index_v;
1802 tmplt_hdr->saved_state_array[index] = read_value;
1803 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
1804 }
1805
1806 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
1807 if (crb_entry->crb_strd.state_index_a) {
1808 index = crb_entry->crb_strd.state_index_a;
1809 addr = tmplt_hdr->saved_state_array[index];
1810 } else {
1811 addr = crb_addr;
1812 }
1813
1814 if (crb_entry->crb_ctrl.state_index_v) {
1815 index = crb_entry->crb_ctrl.state_index_v;
1816 read_value =
1817 tmplt_hdr->saved_state_array[index];
1818 } else {
1819 read_value = crb_entry->value_1;
1820 }
1821
1822 qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
1823 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
1824 }
1825
1826 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
1827 index = crb_entry->crb_ctrl.state_index_v;
1828 read_value = tmplt_hdr->saved_state_array[index];
1829 read_value <<= crb_entry->crb_ctrl.shl;
1830 read_value >>= crb_entry->crb_ctrl.shr;
1831 if (crb_entry->value_2)
1832 read_value &= crb_entry->value_2;
1833 read_value |= crb_entry->value_3;
1834 read_value += crb_entry->value_1;
1835 tmplt_hdr->saved_state_array[index] = read_value;
1836 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
1837 }
1838 crb_addr += crb_entry->crb_strd.addr_stride;
1839 }
1840 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
1841 return rval;
1842}
1843
1844static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
1845 struct qla82xx_minidump_entry_hdr *entry_hdr,
1846 uint32_t **d_ptr)
1847{
1848 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1849 struct qla82xx_minidump_entry_rdocm *ocm_hdr;
1850 uint32_t *data_ptr = *d_ptr;
1851
1852 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1853 ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
1854 r_addr = ocm_hdr->read_addr;
1855 r_stride = ocm_hdr->read_addr_stride;
1856 loop_cnt = ocm_hdr->op_count;
1857
1858 DEBUG2(ql4_printk(KERN_INFO, ha,
1859 "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
1860 __func__, r_addr, r_stride, loop_cnt));
1861
1862 for (i = 0; i < loop_cnt; i++) {
1863 r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
1864 *data_ptr++ = cpu_to_le32(r_value);
1865 r_addr += r_stride;
1866 }
1867 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
1868 __func__, (loop_cnt * sizeof(uint32_t))));
1869 *d_ptr = data_ptr;
1870}
1871
1872static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
1873 struct qla82xx_minidump_entry_hdr *entry_hdr,
1874 uint32_t **d_ptr)
1875{
1876 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
1877 struct qla82xx_minidump_entry_mux *mux_hdr;
1878 uint32_t *data_ptr = *d_ptr;
1879
1880 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1881 mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
1882 r_addr = mux_hdr->read_addr;
1883 s_addr = mux_hdr->select_addr;
1884 s_stride = mux_hdr->select_value_stride;
1885 s_value = mux_hdr->select_value;
1886 loop_cnt = mux_hdr->op_count;
1887
1888 for (i = 0; i < loop_cnt; i++) {
1889 qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
1890 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
1891 *data_ptr++ = cpu_to_le32(s_value);
1892 *data_ptr++ = cpu_to_le32(r_value);
1893 s_value += s_stride;
1894 }
1895 *d_ptr = data_ptr;
1896}
1897
1898static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
1899 struct qla82xx_minidump_entry_hdr *entry_hdr,
1900 uint32_t **d_ptr)
1901{
1902 uint32_t addr, r_addr, c_addr, t_r_addr;
1903 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1904 uint32_t c_value_w;
1905 struct qla82xx_minidump_entry_cache *cache_hdr;
1906 uint32_t *data_ptr = *d_ptr;
1907
1908 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
1909 loop_count = cache_hdr->op_count;
1910 r_addr = cache_hdr->read_addr;
1911 c_addr = cache_hdr->control_addr;
1912 c_value_w = cache_hdr->cache_ctrl.write_value;
1913
1914 t_r_addr = cache_hdr->tag_reg_addr;
1915 t_value = cache_hdr->addr_ctrl.init_tag_value;
1916 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
1917
1918 for (i = 0; i < loop_count; i++) {
1919 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
1920 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
1921 addr = r_addr;
1922 for (k = 0; k < r_cnt; k++) {
1923 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
1924 *data_ptr++ = cpu_to_le32(r_value);
1925 addr += cache_hdr->read_ctrl.read_addr_stride;
1926 }
1927 t_value += cache_hdr->addr_ctrl.tag_value_stride;
1928 }
1929 *d_ptr = data_ptr;
1930}
1931
1932static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
1933 struct qla82xx_minidump_entry_hdr *entry_hdr,
1934 uint32_t **d_ptr)
1935{
1936 uint32_t s_addr, r_addr;
1937 uint32_t r_stride, r_value, r_cnt, qid = 0;
1938 uint32_t i, k, loop_cnt;
1939 struct qla82xx_minidump_entry_queue *q_hdr;
1940 uint32_t *data_ptr = *d_ptr;
1941
1942 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1943 q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
1944 s_addr = q_hdr->select_addr;
1945 r_cnt = q_hdr->rd_strd.read_addr_cnt;
1946 r_stride = q_hdr->rd_strd.read_addr_stride;
1947 loop_cnt = q_hdr->op_count;
1948
1949 for (i = 0; i < loop_cnt; i++) {
1950 qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
1951 r_addr = q_hdr->read_addr;
1952 for (k = 0; k < r_cnt; k++) {
1953 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
1954 *data_ptr++ = cpu_to_le32(r_value);
1955 r_addr += r_stride;
1956 }
1957 qid += q_hdr->q_strd.queue_id_stride;
1958 }
1959 *d_ptr = data_ptr;
1960}
1961
1962#define MD_DIRECT_ROM_WINDOW 0x42110030
1963#define MD_DIRECT_ROM_READ_BASE 0x42150000
1964
1965static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
1966 struct qla82xx_minidump_entry_hdr *entry_hdr,
1967 uint32_t **d_ptr)
1968{
1969 uint32_t r_addr, r_value;
1970 uint32_t i, loop_cnt;
1971 struct qla82xx_minidump_entry_rdrom *rom_hdr;
1972 uint32_t *data_ptr = *d_ptr;
1973
1974 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1975 rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
1976 r_addr = rom_hdr->read_addr;
1977 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
1978
1979 DEBUG2(ql4_printk(KERN_INFO, ha,
1980 "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
1981 __func__, r_addr, loop_cnt));
1982
1983 for (i = 0; i < loop_cnt; i++) {
1984 qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
1985 (r_addr & 0xFFFF0000), 1);
1986 r_value = qla4_8xxx_md_rw_32(ha,
1987 MD_DIRECT_ROM_READ_BASE +
1988 (r_addr & 0x0000FFFF), 0, 0);
1989 *data_ptr++ = cpu_to_le32(r_value);
1990 r_addr += sizeof(uint32_t);
1991 }
1992 *d_ptr = data_ptr;
1993}
1994
1995#define MD_MIU_TEST_AGT_CTRL 0x41000090
1996#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
1997#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1998
1999static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2000 struct qla82xx_minidump_entry_hdr *entry_hdr,
2001 uint32_t **d_ptr)
2002{
2003 uint32_t r_addr, r_value, r_data;
2004 uint32_t i, j, loop_cnt;
2005 struct qla82xx_minidump_entry_rdmem *m_hdr;
2006 unsigned long flags;
2007 uint32_t *data_ptr = *d_ptr;
2008
2009 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2010 m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
2011 r_addr = m_hdr->read_addr;
2012 loop_cnt = m_hdr->read_data_size/16;
2013
2014 DEBUG2(ql4_printk(KERN_INFO, ha,
2015 "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2016 __func__, r_addr, m_hdr->read_data_size));
2017
2018 if (r_addr & 0xf) {
2019 DEBUG2(ql4_printk(KERN_INFO, ha,
2020 "[%s]: Read addr 0x%x not 16 bytes alligned\n",
2021 __func__, r_addr));
2022 return QLA_ERROR;
2023 }
2024
2025 if (m_hdr->read_data_size % 16) {
2026 DEBUG2(ql4_printk(KERN_INFO, ha,
2027 "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2028 __func__, m_hdr->read_data_size));
2029 return QLA_ERROR;
2030 }
2031
2032 DEBUG2(ql4_printk(KERN_INFO, ha,
2033 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2034 __func__, r_addr, m_hdr->read_data_size, loop_cnt));
2035
2036 write_lock_irqsave(&ha->hw_lock, flags);
2037 for (i = 0; i < loop_cnt; i++) {
2038 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
2039 r_value = 0;
2040 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
2041 r_value = MIU_TA_CTL_ENABLE;
2042 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
2043 r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
2044 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
2045
2046 for (j = 0; j < MAX_CTL_CHECK; j++) {
2047 r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
2048 0, 0);
2049 if ((r_value & MIU_TA_CTL_BUSY) == 0)
2050 break;
2051 }
2052
2053 if (j >= MAX_CTL_CHECK) {
2054 printk_ratelimited(KERN_ERR
2055 "%s: failed to read through agent\n",
2056 __func__);
2057 write_unlock_irqrestore(&ha->hw_lock, flags);
2058 return QLA_SUCCESS;
2059 }
2060
2061 for (j = 0; j < 4; j++) {
2062 r_data = qla4_8xxx_md_rw_32(ha,
2063 MD_MIU_TEST_AGT_RDDATA[j],
2064 0, 0);
2065 *data_ptr++ = cpu_to_le32(r_data);
2066 }
2067
2068 r_addr += 16;
2069 }
2070 write_unlock_irqrestore(&ha->hw_lock, flags);
2071
2072 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
2073 __func__, (loop_cnt * 16)));
2074
2075 *d_ptr = data_ptr;
2076 return QLA_SUCCESS;
2077}
2078
2079static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
2080 struct qla82xx_minidump_entry_hdr *entry_hdr,
2081 int index)
2082{
2083 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
2084 DEBUG2(ql4_printk(KERN_INFO, ha,
2085 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2086 ha->host_no, index, entry_hdr->entry_type,
2087 entry_hdr->d_ctrl.entry_capture_mask));
2088}
2089
2090/**
2091 * qla82xx_collect_md_data - Retrieve firmware minidump data.
2092 * @ha: pointer to adapter structure
2093 **/
2094static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2095{
2096 int num_entry_hdr = 0;
2097 struct qla82xx_minidump_entry_hdr *entry_hdr;
2098 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
2099 uint32_t *data_ptr;
2100 uint32_t data_collected = 0;
2101 int i, rval = QLA_ERROR;
2102 uint64_t now;
2103 uint32_t timestamp;
2104
2105 if (!ha->fw_dump) {
2106 ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
2107 __func__, ha->host_no);
2108 return rval;
2109 }
2110
2111 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
2112 ha->fw_dump_tmplt_hdr;
2113 data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
2114 ha->fw_dump_tmplt_size);
2115 data_collected += ha->fw_dump_tmplt_size;
2116
2117 num_entry_hdr = tmplt_hdr->num_of_entries;
2118 ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
2119 __func__, data_ptr);
2120 ql4_printk(KERN_INFO, ha,
2121 "[%s]: no of entry headers in Template: 0x%x\n",
2122 __func__, num_entry_hdr);
2123 ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
2124 __func__, ha->fw_dump_capture_mask);
2125 ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
2126 __func__, ha->fw_dump_size, ha->fw_dump_size);
2127
2128 /* Update current timestamp before taking dump */
2129 now = get_jiffies_64();
2130 timestamp = (u32)(jiffies_to_msecs(now) / 1000);
2131 tmplt_hdr->driver_timestamp = timestamp;
2132
2133 entry_hdr = (struct qla82xx_minidump_entry_hdr *)
2134 (((uint8_t *)ha->fw_dump_tmplt_hdr) +
2135 tmplt_hdr->first_entry_offset);
2136
2137 /* Walk through the entry headers - validate/perform required action */
2138 for (i = 0; i < num_entry_hdr; i++) {
2139 if (data_collected >= ha->fw_dump_size) {
2140 ql4_printk(KERN_INFO, ha,
2141 "Data collected: [0x%x], Total Dump size: [0x%x]\n",
2142 data_collected, ha->fw_dump_size);
2143 return rval;
2144 }
2145
2146 if (!(entry_hdr->d_ctrl.entry_capture_mask &
2147 ha->fw_dump_capture_mask)) {
2148 entry_hdr->d_ctrl.driver_flags |=
2149 QLA82XX_DBG_SKIPPED_FLAG;
2150 goto skip_nxt_entry;
2151 }
2152
2153 DEBUG2(ql4_printk(KERN_INFO, ha,
2154 "Data collected: [0x%x], Dump size left:[0x%x]\n",
2155 data_collected,
2156 (ha->fw_dump_size - data_collected)));
2157
2158 /* Decode the entry type and take required action to capture
2159 * debug data
2160 */
2161 switch (entry_hdr->entry_type) {
2162 case QLA82XX_RDEND:
2163 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2164 break;
2165 case QLA82XX_CNTRL:
2166 rval = qla4_8xxx_minidump_process_control(ha,
2167 entry_hdr);
2168 if (rval != QLA_SUCCESS) {
2169 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2170 goto md_failed;
2171 }
2172 break;
2173 case QLA82XX_RDCRB:
2174 qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
2175 &data_ptr);
2176 break;
2177 case QLA82XX_RDMEM:
2178 rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2179 &data_ptr);
2180 if (rval != QLA_SUCCESS) {
2181 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2182 goto md_failed;
2183 }
2184 break;
2185 case QLA82XX_BOARD:
2186 case QLA82XX_RDROM:
2187 qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
2188 &data_ptr);
2189 break;
2190 case QLA82XX_L2DTG:
2191 case QLA82XX_L2ITG:
2192 case QLA82XX_L2DAT:
2193 case QLA82XX_L2INS:
2194 rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
2195 &data_ptr);
2196 if (rval != QLA_SUCCESS) {
2197 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2198 goto md_failed;
2199 }
2200 break;
2201 case QLA82XX_L1DAT:
2202 case QLA82XX_L1INS:
2203 qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
2204 &data_ptr);
2205 break;
2206 case QLA82XX_RDOCM:
2207 qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
2208 &data_ptr);
2209 break;
2210 case QLA82XX_RDMUX:
2211 qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
2212 &data_ptr);
2213 break;
2214 case QLA82XX_QUEUE:
2215 qla4_8xxx_minidump_process_queue(ha, entry_hdr,
2216 &data_ptr);
2217 break;
2218 case QLA82XX_RDNOP:
2219 default:
2220 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2221 break;
2222 }
2223
2224 data_collected = (uint8_t *)data_ptr -
2225 ((uint8_t *)((uint8_t *)ha->fw_dump +
2226 ha->fw_dump_tmplt_size));
2227skip_nxt_entry:
2228 /* next entry in the template */
2229 entry_hdr = (struct qla82xx_minidump_entry_hdr *)
2230 (((uint8_t *)entry_hdr) +
2231 entry_hdr->entry_size);
2232 }
2233
2234 if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
2235 ql4_printk(KERN_INFO, ha,
2236 "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
2237 data_collected, ha->fw_dump_size);
2238 goto md_failed;
2239 }
2240
2241 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
2242 __func__, i));
2243md_failed:
2244 return rval;
2245}
2246
2247/**
2248 * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
2249 * @ha: pointer to adapter structure
2250 **/
2251static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
2252{
2253 char event_string[40];
2254 char *envp[] = { event_string, NULL };
2255
2256 switch (code) {
2257 case QL4_UEVENT_CODE_FW_DUMP:
2258 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
2259 ha->host_no);
2260 break;
2261 default:
2262 /*do nothing*/
2263 break;
2264 }
2265
2266 kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
2267}
2268
1604/** 2269/**
1605 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw 2270 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
1606 * @ha: pointer to adapter structure 2271 * @ha: pointer to adapter structure
@@ -1659,6 +2324,15 @@ dev_initialize:
1659 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); 2324 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
1660 2325
1661 qla4_8xxx_idc_unlock(ha); 2326 qla4_8xxx_idc_unlock(ha);
2327 if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
2328 !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
2329 if (!qla4_8xxx_collect_md_data(ha)) {
2330 qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
2331 } else {
2332 ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
2333 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
2334 }
2335 }
1662 rval = qla4_8xxx_try_start_fw(ha); 2336 rval = qla4_8xxx_try_start_fw(ha);
1663 qla4_8xxx_idc_lock(ha); 2337 qla4_8xxx_idc_lock(ha);
1664 2338
@@ -1686,6 +2360,7 @@ static void
1686qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha) 2360qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1687{ 2361{
1688 uint32_t dev_state, drv_state, drv_active; 2362 uint32_t dev_state, drv_state, drv_active;
2363 uint32_t active_mask = 0xFFFFFFFF;
1689 unsigned long reset_timeout; 2364 unsigned long reset_timeout;
1690 2365
1691 ql4_printk(KERN_INFO, ha, 2366 ql4_printk(KERN_INFO, ha,
@@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1697 qla4_8xxx_idc_lock(ha); 2372 qla4_8xxx_idc_lock(ha);
1698 } 2373 }
1699 2374
1700 qla4_8xxx_set_rst_ready(ha); 2375 if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
2376 DEBUG2(ql4_printk(KERN_INFO, ha,
2377 "%s(%ld): reset acknowledged\n",
2378 __func__, ha->host_no));
2379 qla4_8xxx_set_rst_ready(ha);
2380 } else {
2381 active_mask = (~(1 << (ha->func_num * 4)));
2382 }
1701 2383
1702 /* wait for 10 seconds for reset ack from all functions */ 2384 /* wait for 10 seconds for reset ack from all functions */
1703 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 2385 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1709 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", 2391 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
1710 __func__, ha->host_no, drv_state, drv_active); 2392 __func__, ha->host_no, drv_state, drv_active);
1711 2393
1712 while (drv_state != drv_active) { 2394 while (drv_state != (drv_active & active_mask)) {
1713 if (time_after_eq(jiffies, reset_timeout)) { 2395 if (time_after_eq(jiffies, reset_timeout)) {
1714 printk("%s: RESET TIMEOUT!\n", DRIVER_NAME); 2396 ql4_printk(KERN_INFO, ha,
2397 "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
2398 DRIVER_NAME, drv_state, drv_active);
1715 break; 2399 break;
1716 } 2400 }
1717 2401
2402 /*
2403 * When reset_owner times out, check which functions
2404 * acked/did not ack
2405 */
2406 if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
2407 ql4_printk(KERN_INFO, ha,
2408 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
2409 __func__, ha->host_no, drv_state,
2410 drv_active);
2411 }
1718 qla4_8xxx_idc_unlock(ha); 2412 qla4_8xxx_idc_unlock(ha);
1719 msleep(1000); 2413 msleep(1000);
1720 qla4_8xxx_idc_lock(ha); 2414 qla4_8xxx_idc_lock(ha);
@@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1723 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2417 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1724 } 2418 }
1725 2419
2420 /* Clear RESET OWNER as we are not going to use it any further */
2421 clear_bit(AF_82XX_RST_OWNER, &ha->flags);
2422
1726 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2423 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1727 ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, 2424 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
1728 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2425 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
1729 2426
1730 /* Force to DEV_COLD unless someone else is starting a reset */ 2427 /* Force to DEV_COLD unless someone else is starting a reset */
1731 if (dev_state != QLA82XX_DEV_INITIALIZING) { 2428 if (dev_state != QLA82XX_DEV_INITIALIZING) {
1732 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 2429 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
1733 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 2430 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
2431 qla4_8xxx_set_rst_ready(ha);
1734 } 2432 }
1735} 2433}
1736 2434
@@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1765 } 2463 }
1766 2464
1767 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2465 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1768 ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 2466 DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
1769 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2467 dev_state, dev_state < MAX_STATES ?
2468 qdev_state[dev_state] : "Unknown"));
1770 2469
1771 /* wait for 30 seconds for device to go ready */ 2470 /* wait for 30 seconds for device to go ready */
1772 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 2471 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1775 while (1) { 2474 while (1) {
1776 2475
1777 if (time_after_eq(jiffies, dev_init_timeout)) { 2476 if (time_after_eq(jiffies, dev_init_timeout)) {
1778 ql4_printk(KERN_WARNING, ha, "Device init failed!\n"); 2477 ql4_printk(KERN_WARNING, ha,
2478 "%s: Device Init Failed 0x%x = %s\n",
2479 DRIVER_NAME,
2480 dev_state, dev_state < MAX_STATES ?
2481 qdev_state[dev_state] : "Unknown");
1779 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2482 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1780 QLA82XX_DEV_FAILED); 2483 QLA82XX_DEV_FAILED);
1781 } 2484 }
1782 2485
1783 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2486 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1784 ql4_printk(KERN_INFO, ha, 2487 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
1785 "2:Device state is 0x%x = %s\n", dev_state, 2488 dev_state, dev_state < MAX_STATES ?
1786 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2489 qdev_state[dev_state] : "Unknown");
1787 2490
1788 /* NOTE: Make sure idc unlocked upon exit of switch statement */ 2491 /* NOTE: Make sure idc unlocked upon exit of switch statement */
1789 switch (dev_state) { 2492 switch (dev_state) {
@@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
2184 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 2887 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
2185 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2888 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2186 QLA82XX_DEV_NEED_RESET); 2889 QLA82XX_DEV_NEED_RESET);
2890 set_bit(AF_82XX_RST_OWNER, &ha->flags);
2187 } else 2891 } else
2188 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n"); 2892 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
2189 2893
@@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
2195 qla4_8xxx_clear_rst_ready(ha); 2899 qla4_8xxx_clear_rst_ready(ha);
2196 qla4_8xxx_idc_unlock(ha); 2900 qla4_8xxx_idc_unlock(ha);
2197 2901
2198 if (rval == QLA_SUCCESS) 2902 if (rval == QLA_SUCCESS) {
2903 ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
2199 clear_bit(AF_FW_RECOVERY, &ha->flags); 2904 clear_bit(AF_FW_RECOVERY, &ha->flags);
2905 }
2200 2906
2201 return rval; 2907 return rval;
2202} 2908}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc7500e47b8b..30258479f100 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -792,4 +792,196 @@ struct crb_addr_pair {
792#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) 792#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
793#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) 793#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
794 794
795/* Minidump related */
796
797/* Entry Type Defines */
798#define QLA82XX_RDNOP 0
799#define QLA82XX_RDCRB 1
800#define QLA82XX_RDMUX 2
801#define QLA82XX_QUEUE 3
802#define QLA82XX_BOARD 4
803#define QLA82XX_RDOCM 6
804#define QLA82XX_PREGS 7
805#define QLA82XX_L1DTG 8
806#define QLA82XX_L1ITG 9
807#define QLA82XX_L1DAT 11
808#define QLA82XX_L1INS 12
809#define QLA82XX_L2DTG 21
810#define QLA82XX_L2ITG 22
811#define QLA82XX_L2DAT 23
812#define QLA82XX_L2INS 24
813#define QLA82XX_RDROM 71
814#define QLA82XX_RDMEM 72
815#define QLA82XX_CNTRL 98
816#define QLA82XX_RDEND 255
817
818/* Opcodes for Control Entries.
819 * These Flags are bit fields.
820 */
821#define QLA82XX_DBG_OPCODE_WR 0x01
822#define QLA82XX_DBG_OPCODE_RW 0x02
823#define QLA82XX_DBG_OPCODE_AND 0x04
824#define QLA82XX_DBG_OPCODE_OR 0x08
825#define QLA82XX_DBG_OPCODE_POLL 0x10
826#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
827#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
828#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
829
830/* Driver Flags */
831#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
832#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
833 * mismatch */
834
835/* Driver_code is for driver to write some info about the entry
836 * currently not used.
837 */
838struct qla82xx_minidump_entry_hdr {
839 uint32_t entry_type;
840 uint32_t entry_size;
841 uint32_t entry_capture_size;
842 struct {
843 uint8_t entry_capture_mask;
844 uint8_t entry_code;
845 uint8_t driver_code;
846 uint8_t driver_flags;
847 } d_ctrl;
848};
849
850/* Read CRB entry header */
851struct qla82xx_minidump_entry_crb {
852 struct qla82xx_minidump_entry_hdr h;
853 uint32_t addr;
854 struct {
855 uint8_t addr_stride;
856 uint8_t state_index_a;
857 uint16_t poll_timeout;
858 } crb_strd;
859 uint32_t data_size;
860 uint32_t op_count;
861
862 struct {
863 uint8_t opcode;
864 uint8_t state_index_v;
865 uint8_t shl;
866 uint8_t shr;
867 } crb_ctrl;
868
869 uint32_t value_1;
870 uint32_t value_2;
871 uint32_t value_3;
872};
873
874struct qla82xx_minidump_entry_cache {
875 struct qla82xx_minidump_entry_hdr h;
876 uint32_t tag_reg_addr;
877 struct {
878 uint16_t tag_value_stride;
879 uint16_t init_tag_value;
880 } addr_ctrl;
881 uint32_t data_size;
882 uint32_t op_count;
883 uint32_t control_addr;
884 struct {
885 uint16_t write_value;
886 uint8_t poll_mask;
887 uint8_t poll_wait;
888 } cache_ctrl;
889 uint32_t read_addr;
890 struct {
891 uint8_t read_addr_stride;
892 uint8_t read_addr_cnt;
893 uint16_t rsvd_1;
894 } read_ctrl;
895};
896
897/* Read OCM */
898struct qla82xx_minidump_entry_rdocm {
899 struct qla82xx_minidump_entry_hdr h;
900 uint32_t rsvd_0;
901 uint32_t rsvd_1;
902 uint32_t data_size;
903 uint32_t op_count;
904 uint32_t rsvd_2;
905 uint32_t rsvd_3;
906 uint32_t read_addr;
907 uint32_t read_addr_stride;
908};
909
910/* Read Memory */
911struct qla82xx_minidump_entry_rdmem {
912 struct qla82xx_minidump_entry_hdr h;
913 uint32_t rsvd[6];
914 uint32_t read_addr;
915 uint32_t read_data_size;
916};
917
918/* Read ROM */
919struct qla82xx_minidump_entry_rdrom {
920 struct qla82xx_minidump_entry_hdr h;
921 uint32_t rsvd[6];
922 uint32_t read_addr;
923 uint32_t read_data_size;
924};
925
926/* Mux entry */
927struct qla82xx_minidump_entry_mux {
928 struct qla82xx_minidump_entry_hdr h;
929 uint32_t select_addr;
930 uint32_t rsvd_0;
931 uint32_t data_size;
932 uint32_t op_count;
933 uint32_t select_value;
934 uint32_t select_value_stride;
935 uint32_t read_addr;
936 uint32_t rsvd_1;
937};
938
939/* Queue entry */
940struct qla82xx_minidump_entry_queue {
941 struct qla82xx_minidump_entry_hdr h;
942 uint32_t select_addr;
943 struct {
944 uint16_t queue_id_stride;
945 uint16_t rsvd_0;
946 } q_strd;
947 uint32_t data_size;
948 uint32_t op_count;
949 uint32_t rsvd_1;
950 uint32_t rsvd_2;
951 uint32_t read_addr;
952 struct {
953 uint8_t read_addr_stride;
954 uint8_t read_addr_cnt;
955 uint16_t rsvd_3;
956 } rd_strd;
957};
958
959#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
960#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
961#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
962#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
963#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
964#define QLA82XX_MINIDUMP_MEM_SIZE 0
965#define QLA82XX_MAX_ENTRY_HDR 4
966
967struct qla82xx_minidump {
968 uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
969 uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
970 uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
971 uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
972 uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
973 uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
974};
975
976#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
977#define RQST_TMPLT_SIZE 0x0
978#define RQST_TMPLT 0x1
979#define MD_DIRECT_ROM_WINDOW 0x42110030
980#define MD_DIRECT_ROM_READ_BASE 0x42150000
981#define MD_MIU_TEST_AGT_CTRL 0x41000090
982#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
983#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
984
985static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
986 0x410000AC, 0x410000B8, 0x410000BC };
795#endif 987#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ee47820c30a6..cd15678f9ada 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
68 " Maximum queue depth to report for target devices.\n" 68 " Maximum queue depth to report for target devices.\n"
69 "\t\t Default: 32."); 69 "\t\t Default: 32.");
70 70
71static int ql4xqfulltracking = 1;
72module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
73MODULE_PARM_DESC(ql4xqfulltracking,
74 " Enable or disable dynamic tracking and adjustment of\n"
75 "\t\t scsi device queue depth.\n"
76 "\t\t 0 - Disable.\n"
77 "\t\t 1 - Enable. (Default)");
78
71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 79static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
72module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 80module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
73MODULE_PARM_DESC(ql4xsess_recovery_tmo, 81MODULE_PARM_DESC(ql4xsess_recovery_tmo,
74 " Target Session Recovery Timeout.\n" 82 " Target Session Recovery Timeout.\n"
75 "\t\t Default: 120 sec."); 83 "\t\t Default: 120 sec.");
76 84
85int ql4xmdcapmask = 0x1F;
86module_param(ql4xmdcapmask, int, S_IRUGO);
87MODULE_PARM_DESC(ql4xmdcapmask,
88 " Set the Minidump driver capture mask level.\n"
89 "\t\t Default is 0x1F.\n"
90 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
91
92int ql4xenablemd = 1;
93module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(ql4xenablemd,
95 " Set to enable minidump.\n"
96 "\t\t 0 - disable minidump\n"
97 "\t\t 1 - enable minidump (Default)");
98
77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 99static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
78/* 100/*
79 * SCSI host template entry points 101 * SCSI host template entry points
@@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
140static void qla4xxx_slave_destroy(struct scsi_device *sdev); 162static void qla4xxx_slave_destroy(struct scsi_device *sdev);
141static umode_t ql4_attr_is_visible(int param_type, int param); 163static umode_t ql4_attr_is_visible(int param_type, int param);
142static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 164static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
165static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
166 int reason);
143 167
144static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 168static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
145 QLA82XX_LEGACY_INTR_CONFIG; 169 QLA82XX_LEGACY_INTR_CONFIG;
@@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
159 .slave_configure = qla4xxx_slave_configure, 183 .slave_configure = qla4xxx_slave_configure,
160 .slave_alloc = qla4xxx_slave_alloc, 184 .slave_alloc = qla4xxx_slave_alloc,
161 .slave_destroy = qla4xxx_slave_destroy, 185 .slave_destroy = qla4xxx_slave_destroy,
186 .change_queue_depth = qla4xxx_change_queue_depth,
162 187
163 .this_id = -1, 188 .this_id = -1,
164 .cmd_per_lun = 3, 189 .cmd_per_lun = 3,
@@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1555 struct iscsi_session *sess; 1580 struct iscsi_session *sess;
1556 struct ddb_entry *ddb_entry; 1581 struct ddb_entry *ddb_entry;
1557 struct scsi_qla_host *ha; 1582 struct scsi_qla_host *ha;
1558 unsigned long flags; 1583 unsigned long flags, wtime;
1584 struct dev_db_entry *fw_ddb_entry = NULL;
1585 dma_addr_t fw_ddb_entry_dma;
1586 uint32_t ddb_state;
1587 int ret;
1559 1588
1560 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1589 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1561 sess = cls_sess->dd_data; 1590 sess = cls_sess->dd_data;
1562 ddb_entry = sess->dd_data; 1591 ddb_entry = sess->dd_data;
1563 ha = ddb_entry->ha; 1592 ha = ddb_entry->ha;
1564 1593
1594 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1595 &fw_ddb_entry_dma, GFP_KERNEL);
1596 if (!fw_ddb_entry) {
1597 ql4_printk(KERN_ERR, ha,
1598 "%s: Unable to allocate dma buffer\n", __func__);
1599 goto destroy_session;
1600 }
1601
1602 wtime = jiffies + (HZ * LOGOUT_TOV);
1603 do {
1604 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1605 fw_ddb_entry, fw_ddb_entry_dma,
1606 NULL, NULL, &ddb_state, NULL,
1607 NULL, NULL);
1608 if (ret == QLA_ERROR)
1609 goto destroy_session;
1610
1611 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1612 (ddb_state == DDB_DS_SESSION_FAILED))
1613 goto destroy_session;
1614
1615 schedule_timeout_uninterruptible(HZ);
1616 } while ((time_after(wtime, jiffies)));
1617
1618destroy_session:
1565 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 1619 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1566 1620
1567 spin_lock_irqsave(&ha->hardware_lock, flags); 1621 spin_lock_irqsave(&ha->hardware_lock, flags);
1568 qla4xxx_free_ddb(ha, ddb_entry); 1622 qla4xxx_free_ddb(ha, ddb_entry);
1569 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1624
1570 iscsi_session_teardown(cls_sess); 1625 iscsi_session_teardown(cls_sess);
1626
1627 if (fw_ddb_entry)
1628 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1629 fw_ddb_entry, fw_ddb_entry_dma);
1571} 1630}
1572 1631
1573static struct iscsi_cls_conn * 1632static struct iscsi_cls_conn *
@@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2220 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 2279 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2221 ha->queues_dma); 2280 ha->queues_dma);
2222 2281
2282 if (ha->fw_dump)
2283 vfree(ha->fw_dump);
2284
2223 ha->queues_len = 0; 2285 ha->queues_len = 0;
2224 ha->queues = NULL; 2286 ha->queues = NULL;
2225 ha->queues_dma = 0; 2287 ha->queues_dma = 0;
@@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2229 ha->response_dma = 0; 2291 ha->response_dma = 0;
2230 ha->shadow_regs = NULL; 2292 ha->shadow_regs = NULL;
2231 ha->shadow_regs_dma = 0; 2293 ha->shadow_regs_dma = 0;
2294 ha->fw_dump = NULL;
2295 ha->fw_dump_size = 0;
2232 2296
2233 /* Free srb pool. */ 2297 /* Free srb pool. */
2234 if (ha->srb_mempool) 2298 if (ha->srb_mempool)
@@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5023 5087
5024 set_bit(AF_INIT_DONE, &ha->flags); 5088 set_bit(AF_INIT_DONE, &ha->flags);
5025 5089
5090 qla4_8xxx_alloc_sysfs_attr(ha);
5091
5026 printk(KERN_INFO 5092 printk(KERN_INFO
5027 " QLogic iSCSI HBA Driver version: %s\n" 5093 " QLogic iSCSI HBA Driver version: %s\n"
5028 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 5094 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5149 iscsi_boot_destroy_kset(ha->boot_kset); 5215 iscsi_boot_destroy_kset(ha->boot_kset);
5150 5216
5151 qla4xxx_destroy_fw_ddb_session(ha); 5217 qla4xxx_destroy_fw_ddb_session(ha);
5218 qla4_8xxx_free_sysfs_attr(ha);
5152 5219
5153 scsi_remove_host(ha->host); 5220 scsi_remove_host(ha->host);
5154 5221
@@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
5217 scsi_deactivate_tcq(sdev, 1); 5284 scsi_deactivate_tcq(sdev, 1);
5218} 5285}
5219 5286
5287static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
5288 int reason)
5289{
5290 if (!ql4xqfulltracking)
5291 return -EOPNOTSUPP;
5292
5293 return iscsi_change_queue_depth(sdev, qdepth, reason);
5294}
5295
5220/** 5296/**
5221 * qla4xxx_del_from_active_array - returns an active srb 5297 * qla4xxx_del_from_active_array - returns an active srb
5222 * @ha: Pointer to host adapter structure. 5298 * @ha: Pointer to host adapter structure.
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 97b30c108e36..cc1cc3518b87 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k16" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 61c82a345f82..bbbc9c918d4c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level;
90EXPORT_SYMBOL(scsi_logging_level); 90EXPORT_SYMBOL(scsi_logging_level);
91#endif 91#endif
92 92
93#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD) 93/* sd, scsi core and power management need to coordinate flushing async actions */
94/* sd and scsi_pm need to coordinate flushing async actions */
95LIST_HEAD(scsi_sd_probe_domain); 94LIST_HEAD(scsi_sd_probe_domain);
96EXPORT_SYMBOL(scsi_sd_probe_domain); 95EXPORT_SYMBOL(scsi_sd_probe_domain);
97#endif
98 96
99/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 97/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
100 * You may not alter any existing entry (although adding new ones is 98 * You may not alter any existing entry (although adding new ones is
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ddfd31d4ce..6dfb9785d345 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
1378{ 1378{
1379 struct scsi_device *sdev = q->queuedata; 1379 struct scsi_device *sdev = q->queuedata;
1380 struct Scsi_Host *shost; 1380 struct Scsi_Host *shost;
1381 struct scsi_target *starget;
1382 1381
1383 if (!sdev) 1382 if (!sdev)
1384 return 0; 1383 return 0;
1385 1384
1386 shost = sdev->host; 1385 shost = sdev->host;
1387 starget = scsi_target(sdev);
1388 1386
1389 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || 1387 /*
1390 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) 1388 * Ignore host/starget busy state.
1389 * Since block layer does not have a concept of fairness across
1390 * multiple queues, congestion of host/starget needs to be handled
1391 * in SCSI layer.
1392 */
1393 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1391 return 1; 1394 return 1;
1392 1395
1393 return 0; 1396 return 0;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index f661a41fa4c6..d4201ded3b22 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
24 err = scsi_device_quiesce(to_scsi_device(dev)); 24 err = scsi_device_quiesce(to_scsi_device(dev));
25 if (err == 0) { 25 if (err == 0) {
26 drv = dev->driver; 26 drv = dev->driver;
27 if (drv && drv->suspend) 27 if (drv && drv->suspend) {
28 err = drv->suspend(dev, msg); 28 err = drv->suspend(dev, msg);
29 if (err)
30 scsi_device_resume(to_scsi_device(dev));
31 }
29 } 32 }
30 dev_dbg(dev, "scsi suspend: %d\n", err); 33 dev_dbg(dev, "scsi suspend: %d\n", err);
31 return err; 34 return err;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 01b03744f1f9..2e5fe584aad3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
147 147
148 do { 148 do {
149 if (list_empty(&scanning_hosts)) 149 if (list_empty(&scanning_hosts))
150 return 0; 150 goto out;
151 /* If we can't get memory immediately, that's OK. Just 151 /* If we can't get memory immediately, that's OK. Just
152 * sleep a little. Even if we never get memory, the async 152 * sleep a little. Even if we never get memory, the async
153 * scans will finish eventually. 153 * scans will finish eventually.
@@ -179,8 +179,11 @@ int scsi_complete_async_scans(void)
179 } 179 }
180 done: 180 done:
181 spin_unlock(&async_scan_lock); 181 spin_unlock(&async_scan_lock);
182
183 kfree(data); 182 kfree(data);
183
184 out:
185 async_synchronize_full_domain(&scsi_sd_probe_domain);
186
184 return 0; 187 return 0;
185} 188}
186 189
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
index 74708fcaf82f..ae7814874618 100644
--- a/drivers/scsi/scsi_wait_scan.c
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <scsi/scsi_scan.h> 15#include "scsi_priv.h"
16 16
17static int __init wait_scan_init(void) 17static int __init wait_scan_init(void)
18{ 18{
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4e010b727818..6a4fd00117ca 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1836 err = pci_request_regions(pdev, UFSHCD); 1836 err = pci_request_regions(pdev, UFSHCD);
1837 if (err < 0) { 1837 if (err < 0) {
1838 dev_err(&pdev->dev, "request regions failed\n"); 1838 dev_err(&pdev->dev, "request regions failed\n");
1839 goto out_disable; 1839 goto out_host_put;
1840 } 1840 }
1841 1841
1842 hba->mmio_base = pci_ioremap_bar(pdev, 0); 1842 hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1925,9 @@ out_iounmap:
1925 iounmap(hba->mmio_base); 1925 iounmap(hba->mmio_base);
1926out_release_regions: 1926out_release_regions:
1927 pci_release_regions(pdev); 1927 pci_release_regions(pdev);
1928out_disable: 1928out_host_put:
1929 scsi_host_put(host); 1929 scsi_host_put(host);
1930out_disable:
1930 pci_clear_master(pdev); 1931 pci_clear_master(pdev);
1931 pci_disable_device(pdev); 1932 pci_disable_device(pdev);
1932out_error: 1933out_error:
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 46ef5fe51db5..0c73dd4f43a0 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -801,7 +801,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
801 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 801 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
802 802
803 if (!cs) { 803 if (!cs) {
804 cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL); 804 cs = kzalloc(sizeof *cs, GFP_KERNEL);
805 if (!cs) 805 if (!cs)
806 return -ENOMEM; 806 return -ENOMEM;
807 cs->base = mcspi->base + spi->chip_select * 0x14; 807 cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -842,6 +842,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
842 cs = spi->controller_state; 842 cs = spi->controller_state;
843 list_del(&cs->node); 843 list_del(&cs->node);
844 844
845 kfree(cs);
845 } 846 }
846 847
847 if (spi->chip_select < spi->master->num_chipselect) { 848 if (spi->chip_select < spi->master->num_chipselect) {
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 1c3d6386ea36..aeac1caba3f9 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -30,6 +30,7 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/usb.h> 31#include <linux/usb.h>
32#include <linux/errno.h> 32#include <linux/errno.h>
33#include <linux/kconfig.h>
33#include <linux/kernel.h> 34#include <linux/kernel.h>
34#include <linux/sched.h> 35#include <linux/sched.h>
35#include <linux/fcntl.h> 36#include <linux/fcntl.h>
@@ -981,6 +982,8 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
981} 982}
982EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister); 983EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
983 984
985#if IS_ENABLED(CONFIG_USB)
986
984static int comedi_old_usb_auto_config(struct usb_interface *intf, 987static int comedi_old_usb_auto_config(struct usb_interface *intf,
985 struct comedi_driver *driver) 988 struct comedi_driver *driver)
986{ 989{
@@ -1043,3 +1046,5 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
1043 comedi_driver_unregister(comedi_driver); 1046 comedi_driver_unregister(comedi_driver);
1044} 1047}
1045EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister); 1048EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
1049
1050#endif
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 292af0f7f451..51665132c61b 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -104,7 +104,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
104 104
105void netlink_exit(struct sock *sock) 105void netlink_exit(struct sock *sock)
106{ 106{
107 sock_release(sock->sk_socket); 107 netlink_kernel_release(sock);
108} 108}
109 109
110int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) 110int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 0338c7cd0a8b..f03fbd3bb454 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -29,8 +29,6 @@ Then fill in the following:
29 * info->driver_module: 29 * info->driver_module:
30 Set to THIS_MODULE. Used to ensure correct ownership 30 Set to THIS_MODULE. Used to ensure correct ownership
31 of various resources allocate by the core. 31 of various resources allocate by the core.
32 * info->num_interrupt_lines:
33 Number of event triggering hardware lines the device has.
34 * info->event_attrs: 32 * info->event_attrs:
35 Attributes used to enable / disable hardware events. 33 Attributes used to enable / disable hardware events.
36 * info->attrs: 34 * info->attrs:
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 2490dd25093b..8f1b3af02f29 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -13,6 +13,7 @@ config AD7291
13config AD7298 13config AD7298
14 tristate "Analog Devices AD7298 ADC driver" 14 tristate "Analog Devices AD7298 ADC driver"
15 depends on SPI 15 depends on SPI
16 select IIO_KFIFO_BUF if IIO_BUFFER
16 help 17 help
17 Say yes here to build support for Analog Devices AD7298 18 Say yes here to build support for Analog Devices AD7298
18 8 Channel ADC with temperature sensor. 19 8 Channel ADC with temperature sensor.
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 10ab6dc823b9..a13afff2dfe6 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -235,7 +235,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
235 .indexed = 1, \ 235 .indexed = 1, \
236 .channel = num, \ 236 .channel = num, \
237 .address = num, \ 237 .address = num, \
238 .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT, \ 238 .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
239 IIO_CHAN_INFO_SCALE_SHARED_BIT, \
239 .scan_index = num, \ 240 .scan_index = num, \
240 .scan_type = IIO_ST('s', 16, 16, 0), \ 241 .scan_type = IIO_ST('s', 16, 16, 0), \
241 } 242 }
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 11acd4c35ed2..8c6ed3b0c6f6 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -208,7 +208,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
208 */ 208 */
209 ret = omap_gem_get_paddr(fbdev->bo, &paddr, true); 209 ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
210 if (ret) { 210 if (ret) {
211 dev_err(dev->dev, "could not map (paddr)!\n"); 211 dev_err(dev->dev,
212 "could not map (paddr)! Skipping framebuffer alloc\n");
212 ret = -ENOMEM; 213 ret = -ENOMEM;
213 goto fail; 214 goto fail;
214 } 215 }
@@ -388,8 +389,11 @@ void omap_fbdev_free(struct drm_device *dev)
388 389
389 fbi = helper->fbdev; 390 fbi = helper->fbdev;
390 391
391 unregister_framebuffer(fbi); 392 /* only cleanup framebuffer if it is present */
392 framebuffer_release(fbi); 393 if (fbi) {
394 unregister_framebuffer(fbi);
395 framebuffer_release(fbi);
396 }
393 397
394 drm_fb_helper_fini(helper); 398 drm_fb_helper_fini(helper);
395 399
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index 4e7ef0e6b79c..d46764b5aaba 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
3002 return oid; 3002 return oid;
3003} 3003}
3004 3004
3005static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, 3005static int zcache_frontswap_store(unsigned type, pgoff_t offset,
3006 struct page *page) 3006 struct page *page)
3007{ 3007{
3008 u64 ind64 = (u64)offset; 3008 u64 ind64 = (u64)offset;
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
3025 3025
3026/* returns 0 if the page was successfully gotten from frontswap, -1 if 3026/* returns 0 if the page was successfully gotten from frontswap, -1 if
3027 * was not present (should never happen!) */ 3027 * was not present (should never happen!) */
3028static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, 3028static int zcache_frontswap_load(unsigned type, pgoff_t offset,
3029 struct page *page) 3029 struct page *page)
3030{ 3030{
3031 u64 ind64 = (u64)offset; 3031 u64 ind64 = (u64)offset;
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored)
3080} 3080}
3081 3081
3082static struct frontswap_ops zcache_frontswap_ops = { 3082static struct frontswap_ops zcache_frontswap_ops = {
3083 .put_page = zcache_frontswap_put_page, 3083 .store = zcache_frontswap_store,
3084 .get_page = zcache_frontswap_get_page, 3084 .load = zcache_frontswap_load,
3085 .invalidate_page = zcache_frontswap_flush_page, 3085 .invalidate_page = zcache_frontswap_flush_page,
3086 .invalidate_area = zcache_frontswap_flush_area, 3086 .invalidate_area = zcache_frontswap_flush_area,
3087 .init = zcache_frontswap_init 3087 .init = zcache_frontswap_init
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bd18e2d0513..69f616c6964e 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
102 /* - */ 102 /* - */
103 {USB_DEVICE(0x20F4, 0x646B)}, 103 {USB_DEVICE(0x20F4, 0x646B)},
104 {USB_DEVICE(0x083A, 0xC512)}, 104 {USB_DEVICE(0x083A, 0xC512)},
105 {USB_DEVICE(0x25D4, 0x4CA1)},
106 {USB_DEVICE(0x25D4, 0x4CAB)},
105 107
106/* RTL8191SU */ 108/* RTL8191SU */
107 /* Realtek */ 109 /* Realtek */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 2734dacacbaf..784c796b9848 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1;
1835 * Swizzling increases objects per swaptype, increasing tmem concurrency 1835 * Swizzling increases objects per swaptype, increasing tmem concurrency
1836 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS 1836 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1837 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from 1837 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1838 * frontswap_get_page(), but has side-effects. Hence using 8. 1838 * frontswap_load(), but has side-effects. Hence using 8.
1839 */ 1839 */
1840#define SWIZ_BITS 8 1840#define SWIZ_BITS 8
1841#define SWIZ_MASK ((1 << SWIZ_BITS) - 1) 1841#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
@@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
1849 return oid; 1849 return oid;
1850} 1850}
1851 1851
1852static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, 1852static int zcache_frontswap_store(unsigned type, pgoff_t offset,
1853 struct page *page) 1853 struct page *page)
1854{ 1854{
1855 u64 ind64 = (u64)offset; 1855 u64 ind64 = (u64)offset;
@@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
1870 1870
1871/* returns 0 if the page was successfully gotten from frontswap, -1 if 1871/* returns 0 if the page was successfully gotten from frontswap, -1 if
1872 * was not present (should never happen!) */ 1872 * was not present (should never happen!) */
1873static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, 1873static int zcache_frontswap_load(unsigned type, pgoff_t offset,
1874 struct page *page) 1874 struct page *page)
1875{ 1875{
1876 u64 ind64 = (u64)offset; 1876 u64 ind64 = (u64)offset;
@@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored)
1919} 1919}
1920 1920
1921static struct frontswap_ops zcache_frontswap_ops = { 1921static struct frontswap_ops zcache_frontswap_ops = {
1922 .put_page = zcache_frontswap_put_page, 1922 .store = zcache_frontswap_store,
1923 .get_page = zcache_frontswap_get_page, 1923 .load = zcache_frontswap_load,
1924 .invalidate_page = zcache_frontswap_flush_page, 1924 .invalidate_page = zcache_frontswap_flush_page,
1925 .invalidate_area = zcache_frontswap_flush_area, 1925 .invalidate_area = zcache_frontswap_flush_area,
1926 .init = zcache_frontswap_init 1926 .init = zcache_frontswap_init
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 37c609898f84..7e6136e2ce81 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -587,14 +587,14 @@ static void sbp_management_request_logout(
587{ 587{
588 struct sbp_tport *tport = agent->tport; 588 struct sbp_tport *tport = agent->tport;
589 struct sbp_tpg *tpg = tport->tpg; 589 struct sbp_tpg *tpg = tport->tpg;
590 int login_id; 590 int id;
591 struct sbp_login_descriptor *login; 591 struct sbp_login_descriptor *login;
592 592
593 login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); 593 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
594 594
595 login = sbp_login_find_by_id(tpg, login_id); 595 login = sbp_login_find_by_id(tpg, id);
596 if (!login) { 596 if (!login) {
597 pr_warn("cannot find login: %d\n", login_id); 597 pr_warn("cannot find login: %d\n", id);
598 598
599 req->status.status = cpu_to_be32( 599 req->status.status = cpu_to_be32(
600 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 600 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index e624b836469c..91799973081a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
374 374
375out: 375out:
376 transport_kunmap_data_sg(cmd); 376 transport_kunmap_data_sg(cmd);
377 target_complete_cmd(cmd, GOOD); 377 if (!rc)
378 return 0; 378 target_complete_cmd(cmd, GOOD);
379 return rc;
379} 380}
380 381
381static inline int core_alua_state_nonoptimized( 382static inline int core_alua_state_nonoptimized(
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 686dba189f8e..9f99d0404908 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice(
133 ret = PTR_ERR(dev_p); 133 ret = PTR_ERR(dev_p);
134 goto fail; 134 goto fail;
135 } 135 }
136
137 /* O_DIRECT too? */
138 flags = O_RDWR | O_CREAT | O_LARGEFILE;
139
140 /* 136 /*
141 * If fd_buffered_io=1 has not been set explicitly (the default), 137 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
142 * use O_SYNC to force FILEIO writes to disk. 138 * of pure timestamp updates.
143 */ 139 */
144 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) 140 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
145 flags |= O_SYNC;
146 141
147 file = filp_open(dev_p, flags, 0600); 142 file = filp_open(dev_p, flags, 0600);
148 if (IS_ERR(file)) { 143 if (IS_ERR(file)) {
@@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
380 } 375 }
381} 376}
382 377
383static void fd_emulate_write_fua(struct se_cmd *cmd)
384{
385 struct se_device *dev = cmd->se_dev;
386 struct fd_dev *fd_dev = dev->dev_ptr;
387 loff_t start = cmd->t_task_lba *
388 dev->se_sub_dev->se_dev_attrib.block_size;
389 loff_t end = start + cmd->data_length;
390 int ret;
391
392 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
393 cmd->t_task_lba, cmd->data_length);
394
395 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
396 if (ret != 0)
397 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
398}
399
400static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 378static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
401 u32 sgl_nents, enum dma_data_direction data_direction) 379 u32 sgl_nents, enum dma_data_direction data_direction)
402{ 380{
@@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
411 ret = fd_do_readv(cmd, sgl, sgl_nents); 389 ret = fd_do_readv(cmd, sgl, sgl_nents);
412 } else { 390 } else {
413 ret = fd_do_writev(cmd, sgl, sgl_nents); 391 ret = fd_do_writev(cmd, sgl, sgl_nents);
414 392 /*
393 * Perform implict vfs_fsync_range() for fd_do_writev() ops
394 * for SCSI WRITEs with Forced Unit Access (FUA) set.
395 * Allow this to happen independent of WCE=0 setting.
396 */
415 if (ret > 0 && 397 if (ret > 0 &&
416 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
417 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 398 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
418 (cmd->se_cmd_flags & SCF_FUA)) { 399 (cmd->se_cmd_flags & SCF_FUA)) {
419 /* 400 struct fd_dev *fd_dev = dev->dev_ptr;
420 * We might need to be a bit smarter here 401 loff_t start = cmd->t_task_lba *
421 * and return some sense data to let the initiator 402 dev->se_sub_dev->se_dev_attrib.block_size;
422 * know the FUA WRITE cache sync failed..? 403 loff_t end = start + cmd->data_length;
423 */
424 fd_emulate_write_fua(cmd);
425 }
426 404
405 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
406 }
427 } 407 }
428 408
429 if (ret < 0) { 409 if (ret < 0) {
@@ -442,7 +422,6 @@ enum {
442static match_table_t tokens = { 422static match_table_t tokens = {
443 {Opt_fd_dev_name, "fd_dev_name=%s"}, 423 {Opt_fd_dev_name, "fd_dev_name=%s"},
444 {Opt_fd_dev_size, "fd_dev_size=%s"}, 424 {Opt_fd_dev_size, "fd_dev_size=%s"},
445 {Opt_fd_buffered_io, "fd_buffered_io=%d"},
446 {Opt_err, NULL} 425 {Opt_err, NULL}
447}; 426};
448 427
@@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params(
454 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 433 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
455 char *orig, *ptr, *arg_p, *opts; 434 char *orig, *ptr, *arg_p, *opts;
456 substring_t args[MAX_OPT_ARGS]; 435 substring_t args[MAX_OPT_ARGS];
457 int ret = 0, arg, token; 436 int ret = 0, token;
458 437
459 opts = kstrdup(page, GFP_KERNEL); 438 opts = kstrdup(page, GFP_KERNEL);
460 if (!opts) 439 if (!opts)
@@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params(
498 " bytes\n", fd_dev->fd_dev_size); 477 " bytes\n", fd_dev->fd_dev_size);
499 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 478 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
500 break; 479 break;
501 case Opt_fd_buffered_io:
502 match_int(args, &arg);
503 if (arg != 1) {
504 pr_err("bogus fd_buffered_io=%d value\n", arg);
505 ret = -EINVAL;
506 goto out;
507 }
508
509 pr_debug("FILEIO: Using buffered I/O"
510 " operations for struct fd_dev\n");
511
512 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
513 break;
514 default: 480 default:
515 break; 481 break;
516 } 482 }
@@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params(
542 ssize_t bl = 0; 508 ssize_t bl = 0;
543 509
544 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 510 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
545 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 511 bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
546 fd_dev->fd_dev_name, fd_dev->fd_dev_size, 512 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
547 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
548 "Buffered" : "Synchronous");
549 return bl; 513 return bl;
550} 514}
551 515
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index fbd59ef7d8be..70ce7fd7111d 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -14,7 +14,6 @@
14 14
15#define FBDF_HAS_PATH 0x01 15#define FBDF_HAS_PATH 0x01
16#define FBDF_HAS_SIZE 0x02 16#define FBDF_HAS_SIZE 0x02
17#define FDBD_USE_BUFFERED_IO 0x04
18 17
19struct fd_dev { 18struct fd_dev {
20 u32 fbd_flags; 19 u32 fbd_flags;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b05fdc0c05d3..634d0f31a28c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -315,7 +315,7 @@ void transport_register_session(
315} 315}
316EXPORT_SYMBOL(transport_register_session); 316EXPORT_SYMBOL(transport_register_session);
317 317
318static void target_release_session(struct kref *kref) 318void target_release_session(struct kref *kref)
319{ 319{
320 struct se_session *se_sess = container_of(kref, 320 struct se_session *se_sess = container_of(kref,
321 struct se_session, sess_kref); 321 struct se_session, sess_kref);
@@ -332,6 +332,12 @@ EXPORT_SYMBOL(target_get_session);
332 332
333void target_put_session(struct se_session *se_sess) 333void target_put_session(struct se_session *se_sess)
334{ 334{
335 struct se_portal_group *tpg = se_sess->se_tpg;
336
337 if (tpg->se_tpg_tfo->put_session != NULL) {
338 tpg->se_tpg_tfo->put_session(se_sess);
339 return;
340 }
335 kref_put(&se_sess->sess_kref, target_release_session); 341 kref_put(&se_sess->sess_kref, target_release_session);
336} 342}
337EXPORT_SYMBOL(target_put_session); 343EXPORT_SYMBOL(target_put_session);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 35819e312624..6cc4358f68c1 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1033,7 +1033,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
1033 if (!retinfo) 1033 if (!retinfo)
1034 return -EFAULT; 1034 return -EFAULT;
1035 memset(&tmp, 0, sizeof(tmp)); 1035 memset(&tmp, 0, sizeof(tmp));
1036 tty_lock(tty); 1036 tty_lock();
1037 tmp.line = tty->index; 1037 tmp.line = tty->index;
1038 tmp.port = state->port; 1038 tmp.port = state->port;
1039 tmp.flags = state->tport.flags; 1039 tmp.flags = state->tport.flags;
@@ -1042,7 +1042,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
1042 tmp.close_delay = state->tport.close_delay; 1042 tmp.close_delay = state->tport.close_delay;
1043 tmp.closing_wait = state->tport.closing_wait; 1043 tmp.closing_wait = state->tport.closing_wait;
1044 tmp.custom_divisor = state->custom_divisor; 1044 tmp.custom_divisor = state->custom_divisor;
1045 tty_unlock(tty); 1045 tty_unlock();
1046 if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) 1046 if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
1047 return -EFAULT; 1047 return -EFAULT;
1048 return 0; 1048 return 0;
@@ -1059,12 +1059,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
1059 if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) 1059 if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
1060 return -EFAULT; 1060 return -EFAULT;
1061 1061
1062 tty_lock(tty); 1062 tty_lock();
1063 change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) || 1063 change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
1064 new_serial.custom_divisor != state->custom_divisor; 1064 new_serial.custom_divisor != state->custom_divisor;
1065 if (new_serial.irq || new_serial.port != state->port || 1065 if (new_serial.irq || new_serial.port != state->port ||
1066 new_serial.xmit_fifo_size != state->xmit_fifo_size) { 1066 new_serial.xmit_fifo_size != state->xmit_fifo_size) {
1067 tty_unlock(tty); 1067 tty_unlock();
1068 return -EINVAL; 1068 return -EINVAL;
1069 } 1069 }
1070 1070
@@ -1074,7 +1074,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
1074 (new_serial.xmit_fifo_size != state->xmit_fifo_size) || 1074 (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
1075 ((new_serial.flags & ~ASYNC_USR_MASK) != 1075 ((new_serial.flags & ~ASYNC_USR_MASK) !=
1076 (port->flags & ~ASYNC_USR_MASK))) { 1076 (port->flags & ~ASYNC_USR_MASK))) {
1077 tty_unlock(tty); 1077 tty_unlock();
1078 return -EPERM; 1078 return -EPERM;
1079 } 1079 }
1080 port->flags = ((port->flags & ~ASYNC_USR_MASK) | 1080 port->flags = ((port->flags & ~ASYNC_USR_MASK) |
@@ -1084,7 +1084,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
1084 } 1084 }
1085 1085
1086 if (new_serial.baud_base < 9600) { 1086 if (new_serial.baud_base < 9600) {
1087 tty_unlock(tty); 1087 tty_unlock();
1088 return -EINVAL; 1088 return -EINVAL;
1089 } 1089 }
1090 1090
@@ -1116,7 +1116,7 @@ check_and_exit:
1116 } 1116 }
1117 } else 1117 } else
1118 retval = startup(tty, state); 1118 retval = startup(tty, state);
1119 tty_unlock(tty); 1119 tty_unlock();
1120 return retval; 1120 return retval;
1121} 1121}
1122 1122
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 6984e1a2686a..e61cabdd69df 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -1599,7 +1599,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
1599 * If the port is the middle of closing, bail out now 1599 * If the port is the middle of closing, bail out now
1600 */ 1600 */
1601 if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) { 1601 if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
1602 wait_event_interruptible_tty(tty, info->port.close_wait, 1602 wait_event_interruptible_tty(info->port.close_wait,
1603 !(info->port.flags & ASYNC_CLOSING)); 1603 !(info->port.flags & ASYNC_CLOSING));
1604 return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS; 1604 return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
1605 } 1605 }
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index d3d91dae065c..944eaeb8e0cf 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -214,24 +214,24 @@ static int xen_hvm_console_init(void)
214 /* already configured */ 214 /* already configured */
215 if (info->intf != NULL) 215 if (info->intf != NULL)
216 return 0; 216 return 0;
217 217 /*
218 * If the toolstack (or the hypervisor) hasn't set these values, the
219 * default value is 0. Even though mfn = 0 and evtchn = 0 are
220 * theoretically correct values, in practice they never are and they
221 * mean that a legacy toolstack hasn't initialized the pv console correctly.
222 */
218 r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); 223 r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
219 if (r < 0) { 224 if (r < 0 || v == 0)
220 kfree(info); 225 goto err;
221 return -ENODEV;
222 }
223 info->evtchn = v; 226 info->evtchn = v;
224 hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); 227 v = 0;
225 if (r < 0) { 228 r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
226 kfree(info); 229 if (r < 0 || v == 0)
227 return -ENODEV; 230 goto err;
228 }
229 mfn = v; 231 mfn = v;
230 info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); 232 info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
231 if (info->intf == NULL) { 233 if (info->intf == NULL)
232 kfree(info); 234 goto err;
233 return -ENODEV;
234 }
235 info->vtermno = HVC_COOKIE; 235 info->vtermno = HVC_COOKIE;
236 236
237 spin_lock(&xencons_lock); 237 spin_lock(&xencons_lock);
@@ -239,6 +239,9 @@ static int xen_hvm_console_init(void)
239 spin_unlock(&xencons_lock); 239 spin_unlock(&xencons_lock);
240 240
241 return 0; 241 return 0;
242err:
243 kfree(info);
244 return -ENODEV;
242} 245}
243 246
244static int xen_pv_console_init(void) 247static int xen_pv_console_init(void)
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 656ad93bbc96..5c6c31459a2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1065,8 +1065,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1065 1065
1066 TRACE_L("read()"); 1066 TRACE_L("read()");
1067 1067
1068 /* FIXME: should use a private lock */ 1068 tty_lock();
1069 tty_lock(tty);
1070 1069
1071 pClient = findClient(pInfo, task_pid(current)); 1070 pClient = findClient(pInfo, task_pid(current));
1072 if (pClient) { 1071 if (pClient) {
@@ -1078,7 +1077,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1078 goto unlock; 1077 goto unlock;
1079 } 1078 }
1080 /* block until there is a message: */ 1079 /* block until there is a message: */
1081 wait_event_interruptible_tty(tty, pInfo->read_wait, 1080 wait_event_interruptible_tty(pInfo->read_wait,
1082 (pMsg = remove_msg(pInfo, pClient))); 1081 (pMsg = remove_msg(pInfo, pClient)));
1083 } 1082 }
1084 1083
@@ -1108,7 +1107,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1108 } 1107 }
1109 ret = -EPERM; 1108 ret = -EPERM;
1110unlock: 1109unlock:
1111 tty_unlock(tty); 1110 tty_unlock();
1112 return ret; 1111 return ret;
1113} 1112}
1114 1113
@@ -1157,7 +1156,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
1157 pHeader->locks = 0; 1156 pHeader->locks = 0;
1158 pHeader->owner = NULL; 1157 pHeader->owner = NULL;
1159 1158
1160 tty_lock(tty); 1159 tty_lock();
1161 1160
1162 pClient = findClient(pInfo, task_pid(current)); 1161 pClient = findClient(pInfo, task_pid(current));
1163 if (pClient) { 1162 if (pClient) {
@@ -1176,7 +1175,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
1176 add_tx_queue(pInfo, pHeader); 1175 add_tx_queue(pInfo, pHeader);
1177 trigger_transmit(pInfo); 1176 trigger_transmit(pInfo);
1178 1177
1179 tty_unlock(tty); 1178 tty_unlock();
1180 1179
1181 return 0; 1180 return 0;
1182} 1181}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 65c7c62c7aae..5505ffc91da4 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -47,7 +47,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
47 wake_up_interruptible(&tty->read_wait); 47 wake_up_interruptible(&tty->read_wait);
48 wake_up_interruptible(&tty->write_wait); 48 wake_up_interruptible(&tty->write_wait);
49 tty->packet = 0; 49 tty->packet = 0;
50 /* Review - krefs on tty_link ?? */
51 if (!tty->link) 50 if (!tty->link)
52 return; 51 return;
53 tty->link->packet = 0; 52 tty->link->packet = 0;
@@ -63,9 +62,9 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
63 mutex_unlock(&devpts_mutex); 62 mutex_unlock(&devpts_mutex);
64 } 63 }
65#endif 64#endif
66 tty_unlock(tty); 65 tty_unlock();
67 tty_vhangup(tty->link); 66 tty_vhangup(tty->link);
68 tty_lock(tty); 67 tty_lock();
69 } 68 }
70} 69}
71 70
@@ -623,27 +622,26 @@ static int ptmx_open(struct inode *inode, struct file *filp)
623 return retval; 622 return retval;
624 623
625 /* find a device that is not in use. */ 624 /* find a device that is not in use. */
626 mutex_lock(&devpts_mutex); 625 tty_lock();
627 index = devpts_new_index(inode); 626 index = devpts_new_index(inode);
627 tty_unlock();
628 if (index < 0) { 628 if (index < 0) {
629 retval = index; 629 retval = index;
630 goto err_file; 630 goto err_file;
631 } 631 }
632 632
633 mutex_unlock(&devpts_mutex);
634
635 mutex_lock(&tty_mutex); 633 mutex_lock(&tty_mutex);
634 mutex_lock(&devpts_mutex);
636 tty = tty_init_dev(ptm_driver, index); 635 tty = tty_init_dev(ptm_driver, index);
636 mutex_unlock(&devpts_mutex);
637 tty_lock();
638 mutex_unlock(&tty_mutex);
637 639
638 if (IS_ERR(tty)) { 640 if (IS_ERR(tty)) {
639 retval = PTR_ERR(tty); 641 retval = PTR_ERR(tty);
640 goto out; 642 goto out;
641 } 643 }
642 644
643 /* The tty returned here is locked so we can safely
644 drop the mutex */
645 mutex_unlock(&tty_mutex);
646
647 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 645 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
648 646
649 tty_add_file(tty, filp); 647 tty_add_file(tty, filp);
@@ -656,17 +654,16 @@ static int ptmx_open(struct inode *inode, struct file *filp)
656 if (retval) 654 if (retval)
657 goto err_release; 655 goto err_release;
658 656
659 tty_unlock(tty); 657 tty_unlock();
660 return 0; 658 return 0;
661err_release: 659err_release:
662 tty_unlock(tty); 660 tty_unlock();
663 tty_release(inode, filp); 661 tty_release(inode, filp);
664 return retval; 662 return retval;
665out: 663out:
666 mutex_unlock(&tty_mutex);
667 devpts_kill_index(inode, index); 664 devpts_kill_index(inode, index);
665 tty_unlock();
668err_file: 666err_file:
669 mutex_unlock(&devpts_mutex);
670 tty_free_file(filp); 667 tty_free_file(filp);
671 return retval; 668 return retval;
672} 669}
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 47d061b9ad4d..6e1958a325bd 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -3113,7 +3113,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
3113 3113
3114/** 3114/**
3115 * serial8250_register_8250_port - register a serial port 3115 * serial8250_register_8250_port - register a serial port
3116 * @port: serial port template 3116 * @up: serial port template
3117 * 3117 *
3118 * Configure the serial port specified by the request. If the 3118 * Configure the serial port specified by the request. If the
3119 * port exists and is in use, it is hung up and unregistered 3119 * port exists and is in use, it is hung up and unregistered
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4ad721fb8405..c17923ec6e95 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -133,6 +133,10 @@ struct pl011_dmatx_data {
133struct uart_amba_port { 133struct uart_amba_port {
134 struct uart_port port; 134 struct uart_port port;
135 struct clk *clk; 135 struct clk *clk;
136 /* Two optional pin states - default & sleep */
137 struct pinctrl *pinctrl;
138 struct pinctrl_state *pins_default;
139 struct pinctrl_state *pins_sleep;
136 const struct vendor_data *vendor; 140 const struct vendor_data *vendor;
137 unsigned int dmacr; /* dma control reg */ 141 unsigned int dmacr; /* dma control reg */
138 unsigned int im; /* interrupt mask */ 142 unsigned int im; /* interrupt mask */
@@ -1312,6 +1316,14 @@ static int pl011_startup(struct uart_port *port)
1312 unsigned int cr; 1316 unsigned int cr;
1313 int retval; 1317 int retval;
1314 1318
1319 /* Optionaly enable pins to be muxed in and configured */
1320 if (!IS_ERR(uap->pins_default)) {
1321 retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1322 if (retval)
1323 dev_err(port->dev,
1324 "could not set default pins\n");
1325 }
1326
1315 retval = clk_prepare(uap->clk); 1327 retval = clk_prepare(uap->clk);
1316 if (retval) 1328 if (retval)
1317 goto out; 1329 goto out;
@@ -1420,6 +1432,7 @@ static void pl011_shutdown(struct uart_port *port)
1420{ 1432{
1421 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1433 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1422 unsigned int cr; 1434 unsigned int cr;
1435 int retval;
1423 1436
1424 /* 1437 /*
1425 * disable all interrupts 1438 * disable all interrupts
@@ -1462,6 +1475,14 @@ static void pl011_shutdown(struct uart_port *port)
1462 */ 1475 */
1463 clk_disable(uap->clk); 1476 clk_disable(uap->clk);
1464 clk_unprepare(uap->clk); 1477 clk_unprepare(uap->clk);
1478 /* Optionally let pins go into sleep states */
1479 if (!IS_ERR(uap->pins_sleep)) {
1480 retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
1481 if (retval)
1482 dev_err(port->dev,
1483 "could not set pins to sleep state\n");
1484 }
1485
1465 1486
1466 if (uap->port.dev->platform_data) { 1487 if (uap->port.dev->platform_data) {
1467 struct amba_pl011_data *plat; 1488 struct amba_pl011_data *plat;
@@ -1792,6 +1813,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
1792 if (!uap) 1813 if (!uap)
1793 return -ENODEV; 1814 return -ENODEV;
1794 1815
1816 /* Allow pins to be muxed in and configured */
1817 if (!IS_ERR(uap->pins_default)) {
1818 ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1819 if (ret)
1820 dev_err(uap->port.dev,
1821 "could not set default pins\n");
1822 }
1823
1795 ret = clk_prepare(uap->clk); 1824 ret = clk_prepare(uap->clk);
1796 if (ret) 1825 if (ret)
1797 return ret; 1826 return ret;
@@ -1844,7 +1873,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1844{ 1873{
1845 struct uart_amba_port *uap; 1874 struct uart_amba_port *uap;
1846 struct vendor_data *vendor = id->data; 1875 struct vendor_data *vendor = id->data;
1847 struct pinctrl *pinctrl;
1848 void __iomem *base; 1876 void __iomem *base;
1849 int i, ret; 1877 int i, ret;
1850 1878
@@ -1869,11 +1897,20 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1869 goto free; 1897 goto free;
1870 } 1898 }
1871 1899
1872 pinctrl = devm_pinctrl_get_select_default(&dev->dev); 1900 uap->pinctrl = devm_pinctrl_get(&dev->dev);
1873 if (IS_ERR(pinctrl)) { 1901 if (IS_ERR(uap->pinctrl)) {
1874 ret = PTR_ERR(pinctrl); 1902 ret = PTR_ERR(uap->pinctrl);
1875 goto unmap; 1903 goto unmap;
1876 } 1904 }
1905 uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
1906 PINCTRL_STATE_DEFAULT);
1907 if (IS_ERR(uap->pins_default))
1908 dev_err(&dev->dev, "could not get default pinstate\n");
1909
1910 uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
1911 PINCTRL_STATE_SLEEP);
1912 if (IS_ERR(uap->pins_sleep))
1913 dev_dbg(&dev->dev, "could not get sleep pinstate\n");
1877 1914
1878 uap->clk = clk_get(&dev->dev, NULL); 1915 uap->clk = clk_get(&dev->dev, NULL);
1879 if (IS_ERR(uap->clk)) { 1916 if (IS_ERR(uap->clk)) {
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 7264d4d26717..80b6b1b1f725 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3976,7 +3976,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
3976 */ 3976 */
3977 if (tty_hung_up_p(filp) || 3977 if (tty_hung_up_p(filp) ||
3978 (info->flags & ASYNC_CLOSING)) { 3978 (info->flags & ASYNC_CLOSING)) {
3979 wait_event_interruptible_tty(tty, info->close_wait, 3979 wait_event_interruptible_tty(info->close_wait,
3980 !(info->flags & ASYNC_CLOSING)); 3980 !(info->flags & ASYNC_CLOSING));
3981#ifdef SERIAL_DO_RESTART 3981#ifdef SERIAL_DO_RESTART
3982 if (info->flags & ASYNC_HUP_NOTIFY) 3982 if (info->flags & ASYNC_HUP_NOTIFY)
@@ -4052,9 +4052,9 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
4052 printk("block_til_ready blocking: ttyS%d, count = %d\n", 4052 printk("block_til_ready blocking: ttyS%d, count = %d\n",
4053 info->line, info->count); 4053 info->line, info->count);
4054#endif 4054#endif
4055 tty_unlock(tty); 4055 tty_unlock();
4056 schedule(); 4056 schedule();
4057 tty_lock(tty); 4057 tty_lock();
4058 } 4058 }
4059 set_current_state(TASK_RUNNING); 4059 set_current_state(TASK_RUNNING);
4060 remove_wait_queue(&info->open_wait, &wait); 4060 remove_wait_queue(&info->open_wait, &wait);
@@ -4115,7 +4115,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
4115 */ 4115 */
4116 if (tty_hung_up_p(filp) || 4116 if (tty_hung_up_p(filp) ||
4117 (info->flags & ASYNC_CLOSING)) { 4117 (info->flags & ASYNC_CLOSING)) {
4118 wait_event_interruptible_tty(tty, info->close_wait, 4118 wait_event_interruptible_tty(info->close_wait,
4119 !(info->flags & ASYNC_CLOSING)); 4119 !(info->flags & ASYNC_CLOSING));
4120#ifdef SERIAL_DO_RESTART 4120#ifdef SERIAL_DO_RESTART
4121 return ((info->flags & ASYNC_HUP_NOTIFY) ? 4121 return ((info->flags & ASYNC_HUP_NOTIFY) ?
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 34bd345da775..6ae2a58d62f2 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -466,7 +466,7 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
466 spin_unlock_irqrestore(&up->port.lock, flags); 466 spin_unlock_irqrestore(&up->port.lock, flags);
467} 467}
468 468
469#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL) 469#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
470/* 470/*
471 * Wait for transmitter & holding register to empty 471 * Wait for transmitter & holding register to empty
472 */ 472 */
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4604153b7954..1bd9163bc118 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev,
2179 return 0; 2179 return 0;
2180} 2180}
2181 2181
2182static void sci_cleanup_single(struct sci_port *port)
2183{
2184 sci_free_gpios(port);
2185
2186 clk_put(port->iclk);
2187 clk_put(port->fclk);
2188
2189 pm_runtime_disable(port->port.dev);
2190}
2191
2182#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 2192#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2183static void serial_console_putchar(struct uart_port *port, int ch) 2193static void serial_console_putchar(struct uart_port *port, int ch)
2184{ 2194{
@@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev)
2360 cpufreq_unregister_notifier(&port->freq_transition, 2370 cpufreq_unregister_notifier(&port->freq_transition,
2361 CPUFREQ_TRANSITION_NOTIFIER); 2371 CPUFREQ_TRANSITION_NOTIFIER);
2362 2372
2363 sci_free_gpios(port);
2364
2365 uart_remove_one_port(&sci_uart_driver, &port->port); 2373 uart_remove_one_port(&sci_uart_driver, &port->port);
2366 2374
2367 clk_put(port->iclk); 2375 sci_cleanup_single(port);
2368 clk_put(port->fclk);
2369 2376
2370 pm_runtime_disable(&dev->dev);
2371 return 0; 2377 return 0;
2372} 2378}
2373 2379
@@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev,
2385 index+1, SCI_NPORTS); 2391 index+1, SCI_NPORTS);
2386 dev_notice(&dev->dev, "Consider bumping " 2392 dev_notice(&dev->dev, "Consider bumping "
2387 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); 2393 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2388 return 0; 2394 return -EINVAL;
2389 } 2395 }
2390 2396
2391 ret = sci_init_single(dev, sciport, index, p); 2397 ret = sci_init_single(dev, sciport, index, p);
2392 if (ret) 2398 if (ret)
2393 return ret; 2399 return ret;
2394 2400
2395 return uart_add_one_port(&sci_uart_driver, &sciport->port); 2401 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
2402 if (ret) {
2403 sci_cleanup_single(sciport);
2404 return ret;
2405 }
2406
2407 return 0;
2396} 2408}
2397 2409
2398static int __devinit sci_probe(struct platform_device *dev) 2410static int __devinit sci_probe(struct platform_device *dev)
@@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev)
2413 2425
2414 ret = sci_probe_single(dev, dev->id, p, sp); 2426 ret = sci_probe_single(dev, dev->id, p, sp);
2415 if (ret) 2427 if (ret)
2416 goto err_unreg; 2428 return ret;
2417 2429
2418 sp->freq_transition.notifier_call = sci_notifier; 2430 sp->freq_transition.notifier_call = sci_notifier;
2419 2431
2420 ret = cpufreq_register_notifier(&sp->freq_transition, 2432 ret = cpufreq_register_notifier(&sp->freq_transition,
2421 CPUFREQ_TRANSITION_NOTIFIER); 2433 CPUFREQ_TRANSITION_NOTIFIER);
2422 if (unlikely(ret < 0)) 2434 if (unlikely(ret < 0)) {
2423 goto err_unreg; 2435 sci_cleanup_single(sp);
2436 return ret;
2437 }
2424 2438
2425#ifdef CONFIG_SH_STANDARD_BIOS 2439#ifdef CONFIG_SH_STANDARD_BIOS
2426 sh_bios_gdb_detach(); 2440 sh_bios_gdb_detach();
2427#endif 2441#endif
2428 2442
2429 return 0; 2443 return 0;
2430
2431err_unreg:
2432 sci_remove(dev);
2433 return ret;
2434} 2444}
2435 2445
2436static int sci_suspend(struct device *dev) 2446static int sci_suspend(struct device *dev)
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 5ed0daae6564..593d40ad0a6b 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -3338,9 +3338,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
3338 printk("%s(%d):block_til_ready blocking on %s count=%d\n", 3338 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3339 __FILE__,__LINE__, tty->driver->name, port->count ); 3339 __FILE__,__LINE__, tty->driver->name, port->count );
3340 3340
3341 tty_unlock(tty); 3341 tty_unlock();
3342 schedule(); 3342 schedule();
3343 tty_lock(tty); 3343 tty_lock();
3344 } 3344 }
3345 3345
3346 set_current_state(TASK_RUNNING); 3346 set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 45b43f11ca39..aa1debf97cc7 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -3336,9 +3336,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
3336 } 3336 }
3337 3337
3338 DBGINFO(("%s block_til_ready wait\n", tty->driver->name)); 3338 DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
3339 tty_unlock(tty); 3339 tty_unlock();
3340 schedule(); 3340 schedule();
3341 tty_lock(tty); 3341 tty_lock();
3342 } 3342 }
3343 3343
3344 set_current_state(TASK_RUNNING); 3344 set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 4a1e4f07765b..a3dddc12d2fe 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -3357,9 +3357,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
3357 printk("%s(%d):%s block_til_ready() count=%d\n", 3357 printk("%s(%d):%s block_til_ready() count=%d\n",
3358 __FILE__,__LINE__, tty->driver->name, port->count ); 3358 __FILE__,__LINE__, tty->driver->name, port->count );
3359 3359
3360 tty_unlock(tty); 3360 tty_unlock();
3361 schedule(); 3361 schedule();
3362 tty_lock(tty); 3362 tty_lock();
3363 } 3363 }
3364 3364
3365 set_current_state(TASK_RUNNING); 3365 set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 9e930c009bf2..b425c79675ad 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -185,7 +185,6 @@ void free_tty_struct(struct tty_struct *tty)
185 put_device(tty->dev); 185 put_device(tty->dev);
186 kfree(tty->write_buf); 186 kfree(tty->write_buf);
187 tty_buffer_free_all(tty); 187 tty_buffer_free_all(tty);
188 tty->magic = 0xDEADDEAD;
189 kfree(tty); 188 kfree(tty);
190} 189}
191 190
@@ -574,7 +573,7 @@ void __tty_hangup(struct tty_struct *tty)
574 } 573 }
575 spin_unlock(&redirect_lock); 574 spin_unlock(&redirect_lock);
576 575
577 tty_lock(tty); 576 tty_lock();
578 577
579 /* some functions below drop BTM, so we need this bit */ 578 /* some functions below drop BTM, so we need this bit */
580 set_bit(TTY_HUPPING, &tty->flags); 579 set_bit(TTY_HUPPING, &tty->flags);
@@ -667,7 +666,7 @@ void __tty_hangup(struct tty_struct *tty)
667 clear_bit(TTY_HUPPING, &tty->flags); 666 clear_bit(TTY_HUPPING, &tty->flags);
668 tty_ldisc_enable(tty); 667 tty_ldisc_enable(tty);
669 668
670 tty_unlock(tty); 669 tty_unlock();
671 670
672 if (f) 671 if (f)
673 fput(f); 672 fput(f);
@@ -1104,12 +1103,12 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1104{ 1103{
1105 if (tty) { 1104 if (tty) {
1106 mutex_lock(&tty->atomic_write_lock); 1105 mutex_lock(&tty->atomic_write_lock);
1107 tty_lock(tty); 1106 tty_lock();
1108 if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) { 1107 if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
1109 tty_unlock(tty); 1108 tty_unlock();
1110 tty->ops->write(tty, msg, strlen(msg)); 1109 tty->ops->write(tty, msg, strlen(msg));
1111 } else 1110 } else
1112 tty_unlock(tty); 1111 tty_unlock();
1113 tty_write_unlock(tty); 1112 tty_write_unlock(tty);
1114 } 1113 }
1115 return; 1114 return;
@@ -1404,7 +1403,6 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1404 } 1403 }
1405 initialize_tty_struct(tty, driver, idx); 1404 initialize_tty_struct(tty, driver, idx);
1406 1405
1407 tty_lock(tty);
1408 retval = tty_driver_install_tty(driver, tty); 1406 retval = tty_driver_install_tty(driver, tty);
1409 if (retval < 0) 1407 if (retval < 0)
1410 goto err_deinit_tty; 1408 goto err_deinit_tty;
@@ -1417,11 +1415,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1417 retval = tty_ldisc_setup(tty, tty->link); 1415 retval = tty_ldisc_setup(tty, tty->link);
1418 if (retval) 1416 if (retval)
1419 goto err_release_tty; 1417 goto err_release_tty;
1420 /* Return the tty locked so that it cannot vanish under the caller */
1421 return tty; 1418 return tty;
1422 1419
1423err_deinit_tty: 1420err_deinit_tty:
1424 tty_unlock(tty);
1425 deinitialize_tty_struct(tty); 1421 deinitialize_tty_struct(tty);
1426 free_tty_struct(tty); 1422 free_tty_struct(tty);
1427err_module_put: 1423err_module_put:
@@ -1430,7 +1426,6 @@ err_module_put:
1430 1426
1431 /* call the tty release_tty routine to clean out this slot */ 1427 /* call the tty release_tty routine to clean out this slot */
1432err_release_tty: 1428err_release_tty:
1433 tty_unlock(tty);
1434 printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, " 1429 printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, "
1435 "clearing slot %d\n", idx); 1430 "clearing slot %d\n", idx);
1436 release_tty(tty, idx); 1431 release_tty(tty, idx);
@@ -1633,7 +1628,7 @@ int tty_release(struct inode *inode, struct file *filp)
1633 if (tty_paranoia_check(tty, inode, __func__)) 1628 if (tty_paranoia_check(tty, inode, __func__))
1634 return 0; 1629 return 0;
1635 1630
1636 tty_lock(tty); 1631 tty_lock();
1637 check_tty_count(tty, __func__); 1632 check_tty_count(tty, __func__);
1638 1633
1639 __tty_fasync(-1, filp, 0); 1634 __tty_fasync(-1, filp, 0);
@@ -1642,11 +1637,10 @@ int tty_release(struct inode *inode, struct file *filp)
1642 pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY && 1637 pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
1643 tty->driver->subtype == PTY_TYPE_MASTER); 1638 tty->driver->subtype == PTY_TYPE_MASTER);
1644 devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0; 1639 devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
1645 /* Review: parallel close */
1646 o_tty = tty->link; 1640 o_tty = tty->link;
1647 1641
1648 if (tty_release_checks(tty, o_tty, idx)) { 1642 if (tty_release_checks(tty, o_tty, idx)) {
1649 tty_unlock(tty); 1643 tty_unlock();
1650 return 0; 1644 return 0;
1651 } 1645 }
1652 1646
@@ -1658,7 +1652,7 @@ int tty_release(struct inode *inode, struct file *filp)
1658 if (tty->ops->close) 1652 if (tty->ops->close)
1659 tty->ops->close(tty, filp); 1653 tty->ops->close(tty, filp);
1660 1654
1661 tty_unlock(tty); 1655 tty_unlock();
1662 /* 1656 /*
1663 * Sanity check: if tty->count is going to zero, there shouldn't be 1657 * Sanity check: if tty->count is going to zero, there shouldn't be
1664 * any waiters on tty->read_wait or tty->write_wait. We test the 1658 * any waiters on tty->read_wait or tty->write_wait. We test the
@@ -1681,7 +1675,7 @@ int tty_release(struct inode *inode, struct file *filp)
1681 opens on /dev/tty */ 1675 opens on /dev/tty */
1682 1676
1683 mutex_lock(&tty_mutex); 1677 mutex_lock(&tty_mutex);
1684 tty_lock_pair(tty, o_tty); 1678 tty_lock();
1685 tty_closing = tty->count <= 1; 1679 tty_closing = tty->count <= 1;
1686 o_tty_closing = o_tty && 1680 o_tty_closing = o_tty &&
1687 (o_tty->count <= (pty_master ? 1 : 0)); 1681 (o_tty->count <= (pty_master ? 1 : 0));
@@ -1712,7 +1706,7 @@ int tty_release(struct inode *inode, struct file *filp)
1712 1706
1713 printk(KERN_WARNING "%s: %s: read/write wait queue active!\n", 1707 printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
1714 __func__, tty_name(tty, buf)); 1708 __func__, tty_name(tty, buf));
1715 tty_unlock_pair(tty, o_tty); 1709 tty_unlock();
1716 mutex_unlock(&tty_mutex); 1710 mutex_unlock(&tty_mutex);
1717 schedule(); 1711 schedule();
1718 } 1712 }
@@ -1775,7 +1769,7 @@ int tty_release(struct inode *inode, struct file *filp)
1775 1769
1776 /* check whether both sides are closing ... */ 1770 /* check whether both sides are closing ... */
1777 if (!tty_closing || (o_tty && !o_tty_closing)) { 1771 if (!tty_closing || (o_tty && !o_tty_closing)) {
1778 tty_unlock_pair(tty, o_tty); 1772 tty_unlock();
1779 return 0; 1773 return 0;
1780 } 1774 }
1781 1775
@@ -1788,16 +1782,14 @@ int tty_release(struct inode *inode, struct file *filp)
1788 tty_ldisc_release(tty, o_tty); 1782 tty_ldisc_release(tty, o_tty);
1789 /* 1783 /*
1790 * The release_tty function takes care of the details of clearing 1784 * The release_tty function takes care of the details of clearing
1791 * the slots and preserving the termios structure. The tty_unlock_pair 1785 * the slots and preserving the termios structure.
1792 * should be safe as we keep a kref while the tty is locked (so the
1793 * unlock never unlocks a freed tty).
1794 */ 1786 */
1795 release_tty(tty, idx); 1787 release_tty(tty, idx);
1796 tty_unlock_pair(tty, o_tty);
1797 1788
1798 /* Make this pty number available for reallocation */ 1789 /* Make this pty number available for reallocation */
1799 if (devpts) 1790 if (devpts)
1800 devpts_kill_index(inode, idx); 1791 devpts_kill_index(inode, idx);
1792 tty_unlock();
1801 return 0; 1793 return 0;
1802} 1794}
1803 1795
@@ -1901,9 +1893,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
1901 * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev. 1893 * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
1902 * tty->count should protect the rest. 1894 * tty->count should protect the rest.
1903 * ->siglock protects ->signal/->sighand 1895 * ->siglock protects ->signal/->sighand
1904 *
1905 * Note: the tty_unlock/lock cases without a ref are only safe due to
1906 * tty_mutex
1907 */ 1896 */
1908 1897
1909static int tty_open(struct inode *inode, struct file *filp) 1898static int tty_open(struct inode *inode, struct file *filp)
@@ -1927,7 +1916,8 @@ retry_open:
1927 retval = 0; 1916 retval = 0;
1928 1917
1929 mutex_lock(&tty_mutex); 1918 mutex_lock(&tty_mutex);
1930 /* This is protected by the tty_mutex */ 1919 tty_lock();
1920
1931 tty = tty_open_current_tty(device, filp); 1921 tty = tty_open_current_tty(device, filp);
1932 if (IS_ERR(tty)) { 1922 if (IS_ERR(tty)) {
1933 retval = PTR_ERR(tty); 1923 retval = PTR_ERR(tty);
@@ -1948,19 +1938,17 @@ retry_open:
1948 } 1938 }
1949 1939
1950 if (tty) { 1940 if (tty) {
1951 tty_lock(tty);
1952 retval = tty_reopen(tty); 1941 retval = tty_reopen(tty);
1953 if (retval < 0) { 1942 if (retval)
1954 tty_unlock(tty);
1955 tty = ERR_PTR(retval); 1943 tty = ERR_PTR(retval);
1956 } 1944 } else
1957 } else /* Returns with the tty_lock held for now */
1958 tty = tty_init_dev(driver, index); 1945 tty = tty_init_dev(driver, index);
1959 1946
1960 mutex_unlock(&tty_mutex); 1947 mutex_unlock(&tty_mutex);
1961 if (driver) 1948 if (driver)
1962 tty_driver_kref_put(driver); 1949 tty_driver_kref_put(driver);
1963 if (IS_ERR(tty)) { 1950 if (IS_ERR(tty)) {
1951 tty_unlock();
1964 retval = PTR_ERR(tty); 1952 retval = PTR_ERR(tty);
1965 goto err_file; 1953 goto err_file;
1966 } 1954 }
@@ -1989,7 +1977,7 @@ retry_open:
1989 printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__, 1977 printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
1990 retval, tty->name); 1978 retval, tty->name);
1991#endif 1979#endif
1992 tty_unlock(tty); /* need to call tty_release without BTM */ 1980 tty_unlock(); /* need to call tty_release without BTM */
1993 tty_release(inode, filp); 1981 tty_release(inode, filp);
1994 if (retval != -ERESTARTSYS) 1982 if (retval != -ERESTARTSYS)
1995 return retval; 1983 return retval;
@@ -2001,15 +1989,17 @@ retry_open:
2001 /* 1989 /*
2002 * Need to reset f_op in case a hangup happened. 1990 * Need to reset f_op in case a hangup happened.
2003 */ 1991 */
1992 tty_lock();
2004 if (filp->f_op == &hung_up_tty_fops) 1993 if (filp->f_op == &hung_up_tty_fops)
2005 filp->f_op = &tty_fops; 1994 filp->f_op = &tty_fops;
1995 tty_unlock();
2006 goto retry_open; 1996 goto retry_open;
2007 } 1997 }
2008 tty_unlock(tty); 1998 tty_unlock();
2009 1999
2010 2000
2011 mutex_lock(&tty_mutex); 2001 mutex_lock(&tty_mutex);
2012 tty_lock(tty); 2002 tty_lock();
2013 spin_lock_irq(&current->sighand->siglock); 2003 spin_lock_irq(&current->sighand->siglock);
2014 if (!noctty && 2004 if (!noctty &&
2015 current->signal->leader && 2005 current->signal->leader &&
@@ -2017,10 +2007,11 @@ retry_open:
2017 tty->session == NULL) 2007 tty->session == NULL)
2018 __proc_set_tty(current, tty); 2008 __proc_set_tty(current, tty);
2019 spin_unlock_irq(&current->sighand->siglock); 2009 spin_unlock_irq(&current->sighand->siglock);
2020 tty_unlock(tty); 2010 tty_unlock();
2021 mutex_unlock(&tty_mutex); 2011 mutex_unlock(&tty_mutex);
2022 return 0; 2012 return 0;
2023err_unlock: 2013err_unlock:
2014 tty_unlock();
2024 mutex_unlock(&tty_mutex); 2015 mutex_unlock(&tty_mutex);
2025 /* after locks to avoid deadlock */ 2016 /* after locks to avoid deadlock */
2026 if (!IS_ERR_OR_NULL(driver)) 2017 if (!IS_ERR_OR_NULL(driver))
@@ -2103,13 +2094,10 @@ out:
2103 2094
2104static int tty_fasync(int fd, struct file *filp, int on) 2095static int tty_fasync(int fd, struct file *filp, int on)
2105{ 2096{
2106 struct tty_struct *tty = file_tty(filp);
2107 int retval; 2097 int retval;
2108 2098 tty_lock();
2109 tty_lock(tty);
2110 retval = __tty_fasync(fd, filp, on); 2099 retval = __tty_fasync(fd, filp, on);
2111 tty_unlock(tty); 2100 tty_unlock();
2112
2113 return retval; 2101 return retval;
2114} 2102}
2115 2103
@@ -2946,7 +2934,6 @@ void initialize_tty_struct(struct tty_struct *tty,
2946 tty->pgrp = NULL; 2934 tty->pgrp = NULL;
2947 tty->overrun_time = jiffies; 2935 tty->overrun_time = jiffies;
2948 tty_buffer_init(tty); 2936 tty_buffer_init(tty);
2949 mutex_init(&tty->legacy_mutex);
2950 mutex_init(&tty->termios_mutex); 2937 mutex_init(&tty->termios_mutex);
2951 mutex_init(&tty->ldisc_mutex); 2938 mutex_init(&tty->ldisc_mutex);
2952 init_waitqueue_head(&tty->write_wait); 2939 init_waitqueue_head(&tty->write_wait);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index ba8be396a621..9911eb6b34cd 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -568,7 +568,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
568 if (IS_ERR(new_ldisc)) 568 if (IS_ERR(new_ldisc))
569 return PTR_ERR(new_ldisc); 569 return PTR_ERR(new_ldisc);
570 570
571 tty_lock(tty); 571 tty_lock();
572 /* 572 /*
573 * We need to look at the tty locking here for pty/tty pairs 573 * We need to look at the tty locking here for pty/tty pairs
574 * when both sides try to change in parallel. 574 * when both sides try to change in parallel.
@@ -582,12 +582,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
582 */ 582 */
583 583
584 if (tty->ldisc->ops->num == ldisc) { 584 if (tty->ldisc->ops->num == ldisc) {
585 tty_unlock(tty); 585 tty_unlock();
586 tty_ldisc_put(new_ldisc); 586 tty_ldisc_put(new_ldisc);
587 return 0; 587 return 0;
588 } 588 }
589 589
590 tty_unlock(tty); 590 tty_unlock();
591 /* 591 /*
592 * Problem: What do we do if this blocks ? 592 * Problem: What do we do if this blocks ?
593 * We could deadlock here 593 * We could deadlock here
@@ -595,7 +595,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
595 595
596 tty_wait_until_sent(tty, 0); 596 tty_wait_until_sent(tty, 0);
597 597
598 tty_lock(tty); 598 tty_lock();
599 mutex_lock(&tty->ldisc_mutex); 599 mutex_lock(&tty->ldisc_mutex);
600 600
601 /* 601 /*
@@ -605,10 +605,10 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
605 605
606 while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) { 606 while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
607 mutex_unlock(&tty->ldisc_mutex); 607 mutex_unlock(&tty->ldisc_mutex);
608 tty_unlock(tty); 608 tty_unlock();
609 wait_event(tty_ldisc_wait, 609 wait_event(tty_ldisc_wait,
610 test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0); 610 test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
611 tty_lock(tty); 611 tty_lock();
612 mutex_lock(&tty->ldisc_mutex); 612 mutex_lock(&tty->ldisc_mutex);
613 } 613 }
614 614
@@ -623,7 +623,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
623 623
624 o_ldisc = tty->ldisc; 624 o_ldisc = tty->ldisc;
625 625
626 tty_unlock(tty); 626 tty_unlock();
627 /* 627 /*
628 * Make sure we don't change while someone holds a 628 * Make sure we don't change while someone holds a
629 * reference to the line discipline. The TTY_LDISC bit 629 * reference to the line discipline. The TTY_LDISC bit
@@ -650,7 +650,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
650 650
651 retval = tty_ldisc_wait_idle(tty, 5 * HZ); 651 retval = tty_ldisc_wait_idle(tty, 5 * HZ);
652 652
653 tty_lock(tty); 653 tty_lock();
654 mutex_lock(&tty->ldisc_mutex); 654 mutex_lock(&tty->ldisc_mutex);
655 655
656 /* handle wait idle failure locked */ 656 /* handle wait idle failure locked */
@@ -665,7 +665,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
665 clear_bit(TTY_LDISC_CHANGING, &tty->flags); 665 clear_bit(TTY_LDISC_CHANGING, &tty->flags);
666 mutex_unlock(&tty->ldisc_mutex); 666 mutex_unlock(&tty->ldisc_mutex);
667 tty_ldisc_put(new_ldisc); 667 tty_ldisc_put(new_ldisc);
668 tty_unlock(tty); 668 tty_unlock();
669 return -EIO; 669 return -EIO;
670 } 670 }
671 671
@@ -708,7 +708,7 @@ enable:
708 if (o_work) 708 if (o_work)
709 schedule_work(&o_tty->buf.work); 709 schedule_work(&o_tty->buf.work);
710 mutex_unlock(&tty->ldisc_mutex); 710 mutex_unlock(&tty->ldisc_mutex);
711 tty_unlock(tty); 711 tty_unlock();
712 return retval; 712 return retval;
713} 713}
714 714
@@ -816,11 +816,11 @@ void tty_ldisc_hangup(struct tty_struct *tty)
816 * need to wait for another function taking the BTM 816 * need to wait for another function taking the BTM
817 */ 817 */
818 clear_bit(TTY_LDISC, &tty->flags); 818 clear_bit(TTY_LDISC, &tty->flags);
819 tty_unlock(tty); 819 tty_unlock();
820 cancel_work_sync(&tty->buf.work); 820 cancel_work_sync(&tty->buf.work);
821 mutex_unlock(&tty->ldisc_mutex); 821 mutex_unlock(&tty->ldisc_mutex);
822retry: 822retry:
823 tty_lock(tty); 823 tty_lock();
824 mutex_lock(&tty->ldisc_mutex); 824 mutex_lock(&tty->ldisc_mutex);
825 825
826 /* At this point we have a closed ldisc and we want to 826 /* At this point we have a closed ldisc and we want to
@@ -831,7 +831,7 @@ retry:
831 if (atomic_read(&tty->ldisc->users) != 1) { 831 if (atomic_read(&tty->ldisc->users) != 1) {
832 char cur_n[TASK_COMM_LEN], tty_n[64]; 832 char cur_n[TASK_COMM_LEN], tty_n[64];
833 long timeout = 3 * HZ; 833 long timeout = 3 * HZ;
834 tty_unlock(tty); 834 tty_unlock();
835 835
836 while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) { 836 while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
837 timeout = MAX_SCHEDULE_TIMEOUT; 837 timeout = MAX_SCHEDULE_TIMEOUT;
@@ -894,23 +894,6 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
894 tty_ldisc_enable(tty); 894 tty_ldisc_enable(tty);
895 return 0; 895 return 0;
896} 896}
897
898static void tty_ldisc_kill(struct tty_struct *tty)
899{
900 mutex_lock(&tty->ldisc_mutex);
901 /*
902 * Now kill off the ldisc
903 */
904 tty_ldisc_close(tty, tty->ldisc);
905 tty_ldisc_put(tty->ldisc);
906 /* Force an oops if we mess this up */
907 tty->ldisc = NULL;
908
909 /* Ensure the next open requests the N_TTY ldisc */
910 tty_set_termios_ldisc(tty, N_TTY);
911 mutex_unlock(&tty->ldisc_mutex);
912}
913
914/** 897/**
915 * tty_ldisc_release - release line discipline 898 * tty_ldisc_release - release line discipline
916 * @tty: tty being shut down 899 * @tty: tty being shut down
@@ -929,19 +912,27 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
929 * race with the set_ldisc code path. 912 * race with the set_ldisc code path.
930 */ 913 */
931 914
932 tty_unlock_pair(tty, o_tty); 915 tty_unlock();
933 tty_ldisc_halt(tty); 916 tty_ldisc_halt(tty);
934 tty_ldisc_flush_works(tty); 917 tty_ldisc_flush_works(tty);
935 if (o_tty) { 918 tty_lock();
936 tty_ldisc_halt(o_tty);
937 tty_ldisc_flush_works(o_tty);
938 }
939 tty_lock_pair(tty, o_tty);
940 919
920 mutex_lock(&tty->ldisc_mutex);
921 /*
922 * Now kill off the ldisc
923 */
924 tty_ldisc_close(tty, tty->ldisc);
925 tty_ldisc_put(tty->ldisc);
926 /* Force an oops if we mess this up */
927 tty->ldisc = NULL;
928
929 /* Ensure the next open requests the N_TTY ldisc */
930 tty_set_termios_ldisc(tty, N_TTY);
931 mutex_unlock(&tty->ldisc_mutex);
941 932
942 tty_ldisc_kill(tty); 933 /* This will need doing differently if we need to lock */
943 if (o_tty) 934 if (o_tty)
944 tty_ldisc_kill(o_tty); 935 tty_ldisc_release(o_tty, NULL);
945 936
946 /* And the memory resources remaining (buffers, termios) will be 937 /* And the memory resources remaining (buffers, termios) will be
947 disposed of when the kref hits zero */ 938 disposed of when the kref hits zero */
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 69adc80c98cd..9ff986c32a21 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -4,59 +4,29 @@
4#include <linux/semaphore.h> 4#include <linux/semaphore.h>
5#include <linux/sched.h> 5#include <linux/sched.h>
6 6
7/* Legacy tty mutex glue */ 7/*
8 * The 'big tty mutex'
9 *
10 * This mutex is taken and released by tty_lock() and tty_unlock(),
11 * replacing the older big kernel lock.
12 * It can no longer be taken recursively, and does not get
13 * released implicitly while sleeping.
14 *
15 * Don't use in new code.
16 */
17static DEFINE_MUTEX(big_tty_mutex);
8 18
9/* 19/*
10 * Getting the big tty mutex. 20 * Getting the big tty mutex.
11 */ 21 */
12 22void __lockfunc tty_lock(void)
13void __lockfunc tty_lock(struct tty_struct *tty)
14{ 23{
15 if (tty->magic != TTY_MAGIC) { 24 mutex_lock(&big_tty_mutex);
16 printk(KERN_ERR "L Bad %p\n", tty);
17 WARN_ON(1);
18 return;
19 }
20 tty_kref_get(tty);
21 mutex_lock(&tty->legacy_mutex);
22} 25}
23EXPORT_SYMBOL(tty_lock); 26EXPORT_SYMBOL(tty_lock);
24 27
25void __lockfunc tty_unlock(struct tty_struct *tty) 28void __lockfunc tty_unlock(void)
26{ 29{
27 if (tty->magic != TTY_MAGIC) { 30 mutex_unlock(&big_tty_mutex);
28 printk(KERN_ERR "U Bad %p\n", tty);
29 WARN_ON(1);
30 return;
31 }
32 mutex_unlock(&tty->legacy_mutex);
33 tty_kref_put(tty);
34} 31}
35EXPORT_SYMBOL(tty_unlock); 32EXPORT_SYMBOL(tty_unlock);
36
37/*
38 * Getting the big tty mutex for a pair of ttys with lock ordering
39 * On a non pty/tty pair tty2 can be NULL which is just fine.
40 */
41void __lockfunc tty_lock_pair(struct tty_struct *tty,
42 struct tty_struct *tty2)
43{
44 if (tty < tty2) {
45 tty_lock(tty);
46 tty_lock(tty2);
47 } else {
48 if (tty2 && tty2 != tty)
49 tty_lock(tty2);
50 tty_lock(tty);
51 }
52}
53EXPORT_SYMBOL(tty_lock_pair);
54
55void __lockfunc tty_unlock_pair(struct tty_struct *tty,
56 struct tty_struct *tty2)
57{
58 tty_unlock(tty);
59 if (tty2 && tty2 != tty)
60 tty_unlock(tty2);
61}
62EXPORT_SYMBOL(tty_unlock_pair);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index d9cca95a5452..bf6e238146ae 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -230,7 +230,7 @@ int tty_port_block_til_ready(struct tty_port *port,
230 230
231 /* block if port is in the process of being closed */ 231 /* block if port is in the process of being closed */
232 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { 232 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
233 wait_event_interruptible_tty(tty, port->close_wait, 233 wait_event_interruptible_tty(port->close_wait,
234 !(port->flags & ASYNC_CLOSING)); 234 !(port->flags & ASYNC_CLOSING));
235 if (port->flags & ASYNC_HUP_NOTIFY) 235 if (port->flags & ASYNC_HUP_NOTIFY)
236 return -EAGAIN; 236 return -EAGAIN;
@@ -296,9 +296,9 @@ int tty_port_block_til_ready(struct tty_port *port,
296 retval = -ERESTARTSYS; 296 retval = -ERESTARTSYS;
297 break; 297 break;
298 } 298 }
299 tty_unlock(tty); 299 tty_unlock();
300 schedule(); 300 schedule();
301 tty_lock(tty); 301 tty_lock();
302 } 302 }
303 finish_wait(&port->open_wait, &wait); 303 finish_wait(&port->open_wait, &wait);
304 304
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f2a120eea9d4..36a2a0b7b82c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
567 567
568 usb_autopm_put_interface(acm->control); 568 usb_autopm_put_interface(acm->control);
569 569
570 /*
571 * Unthrottle device in case the TTY was closed while throttled.
572 */
573 spin_lock_irq(&acm->read_lock);
574 acm->throttled = 0;
575 acm->throttle_req = 0;
576 spin_unlock_irq(&acm->read_lock);
577
570 if (acm_submit_read_urbs(acm, GFP_KERNEL)) 578 if (acm_submit_read_urbs(acm, GFP_KERNEL))
571 goto error_submit_read_urbs; 579 goto error_submit_read_urbs;
572 580
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index ea8b304f0e85..8fd398dffced 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -55,6 +55,15 @@ static const struct usb_device_id wdm_ids[] = {
55 .bInterfaceSubClass = 1, 55 .bInterfaceSubClass = 1,
56 .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */ 56 .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */
57 }, 57 },
58 {
59 /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
60 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
61 USB_DEVICE_ID_MATCH_INT_INFO,
62 .idVendor = HUAWEI_VENDOR_ID,
63 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
64 .bInterfaceSubClass = 1,
65 .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */
66 },
58 { } 67 { }
59}; 68};
60 69
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 57ed9e400c06..622b4a48e732 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,15 +493,6 @@ static int hcd_pci_suspend_noirq(struct device *dev)
493 493
494 pci_save_state(pci_dev); 494 pci_save_state(pci_dev);
495 495
496 /*
497 * Some systems crash if an EHCI controller is in D3 during
498 * a sleep transition. We have to leave such controllers in D0.
499 */
500 if (hcd->broken_pci_sleep) {
501 dev_dbg(dev, "Staying in PCI D0\n");
502 return retval;
503 }
504
505 /* If the root hub is dead rather than suspended, disallow remote 496 /* If the root hub is dead rather than suspended, disallow remote
506 * wakeup. usb_hc_died() should ensure that both hosts are marked as 497 * wakeup. usb_hc_died() should ensure that both hosts are marked as
507 * dying, so we only need to check the primary roothub. 498 * dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 04fb834c3fa1..25a7422ee657 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3379,7 +3379,7 @@ int usb_disable_lpm(struct usb_device *udev)
3379 return 0; 3379 return 0;
3380 3380
3381 udev->lpm_disable_count++; 3381 udev->lpm_disable_count++;
3382 if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0)) 3382 if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
3383 return 0; 3383 return 0;
3384 3384
3385 /* If LPM is enabled, attempt to disable it. */ 3385 /* If LPM is enabled, attempt to disable it. */
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b548cf1dbc62..bdd1c6749d88 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1838,7 +1838,6 @@ free_interfaces:
1838 intfc = cp->intf_cache[i]; 1838 intfc = cp->intf_cache[i];
1839 intf->altsetting = intfc->altsetting; 1839 intf->altsetting = intfc->altsetting;
1840 intf->num_altsetting = intfc->num_altsetting; 1840 intf->num_altsetting = intfc->num_altsetting;
1841 intf->intf_assoc = find_iad(dev, cp, i);
1842 kref_get(&intfc->ref); 1841 kref_get(&intfc->ref);
1843 1842
1844 alt = usb_altnum_to_altsetting(intf, 0); 1843 alt = usb_altnum_to_altsetting(intf, 0);
@@ -1851,6 +1850,8 @@ free_interfaces:
1851 if (!alt) 1850 if (!alt)
1852 alt = &intf->altsetting[0]; 1851 alt = &intf->altsetting[0];
1853 1852
1853 intf->intf_assoc =
1854 find_iad(dev, cp, alt->desc.bInterfaceNumber);
1854 intf->cur_altsetting = alt; 1855 intf->cur_altsetting = alt;
1855 usb_enable_interface(dev, intf, true); 1856 usb_enable_interface(dev, intf, true);
1856 intf->dev.parent = &dev->dev; 1857 intf->dev.parent = &dev->dev;
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index e23bf7984aaf..9a9bced813ed 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -599,12 +599,6 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
599 599
600 spin_lock_irqsave(&ep->udc->lock, flags); 600 spin_lock_irqsave(&ep->udc->lock, flags);
601 601
602 if (ep->ep.desc) {
603 spin_unlock_irqrestore(&ep->udc->lock, flags);
604 DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
605 return -EBUSY;
606 }
607
608 ep->ep.desc = desc; 602 ep->ep.desc = desc;
609 ep->ep.maxpacket = maxpacket; 603 ep->ep.maxpacket = maxpacket;
610 604
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 51881f3bd07a..b09452d6f33a 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1596,7 +1596,7 @@ static int qe_ep_enable(struct usb_ep *_ep,
1596 ep = container_of(_ep, struct qe_ep, ep); 1596 ep = container_of(_ep, struct qe_ep, ep);
1597 1597
1598 /* catch various bogus parameters */ 1598 /* catch various bogus parameters */
1599 if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] || 1599 if (!_ep || !desc || _ep->name == ep_name[0] ||
1600 (desc->bDescriptorType != USB_DT_ENDPOINT)) 1600 (desc->bDescriptorType != USB_DT_ENDPOINT))
1601 return -EINVAL; 1601 return -EINVAL;
1602 1602
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 28316858208b..bc6f9bb9994a 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -567,7 +567,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
567 ep = container_of(_ep, struct fsl_ep, ep); 567 ep = container_of(_ep, struct fsl_ep, ep);
568 568
569 /* catch various bogus parameters */ 569 /* catch various bogus parameters */
570 if (!_ep || !desc || ep->ep.desc 570 if (!_ep || !desc
571 || (desc->bDescriptorType != USB_DT_ENDPOINT)) 571 || (desc->bDescriptorType != USB_DT_ENDPOINT))
572 return -EINVAL; 572 return -EINVAL;
573 573
@@ -2575,7 +2575,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2575 /* for ep0: the desc defined here; 2575 /* for ep0: the desc defined here;
2576 * for other eps, gadget layer called ep_enable with defined desc 2576 * for other eps, gadget layer called ep_enable with defined desc
2577 */ 2577 */
2578 udc_controller->eps[0].desc = &fsl_ep0_desc; 2578 udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
2579 udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD; 2579 udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
2580 2580
2581 /* setup the udc->eps[] for non-control endpoints and link 2581 /* setup the udc->eps[] for non-control endpoints and link
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 5cd7b7e7ddb4..f61a967f7082 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -568,10 +568,10 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
568/* 568/*
569 * ### internal used help routines. 569 * ### internal used help routines.
570 */ 570 */
571#define ep_index(EP) ((EP)->desc->bEndpointAddress&0xF) 571#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF)
572#define ep_maxpacket(EP) ((EP)->ep.maxpacket) 572#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
573#define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \ 573#define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
574 USB_DIR_IN ):((EP)->desc->bEndpointAddress \ 574 USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
575 & USB_DIR_IN)==USB_DIR_IN) 575 & USB_DIR_IN)==USB_DIR_IN)
576#define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \ 576#define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \
577 &udc->eps[pipe]) 577 &udc->eps[pipe])
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index b241e6c6a7f2..3d28fb976c78 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -102,7 +102,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
102 unsigned long flags; 102 unsigned long flags;
103 103
104 ep = container_of(_ep, struct goku_ep, ep); 104 ep = container_of(_ep, struct goku_ep, ep);
105 if (!_ep || !desc || ep->ep.desc 105 if (!_ep || !desc
106 || desc->bDescriptorType != USB_DT_ENDPOINT) 106 || desc->bDescriptorType != USB_DT_ENDPOINT)
107 return -EINVAL; 107 return -EINVAL;
108 dev = ep->dev; 108 dev = ep->dev;
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index dbcd1329495e..117a4bba1b8c 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -464,7 +464,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
464 ep = container_of(_ep, struct mv_ep, ep); 464 ep = container_of(_ep, struct mv_ep, ep);
465 udc = ep->udc; 465 udc = ep->udc;
466 466
467 if (!_ep || !desc || ep->ep.desc 467 if (!_ep || !desc
468 || desc->bDescriptorType != USB_DT_ENDPOINT) 468 || desc->bDescriptorType != USB_DT_ENDPOINT)
469 return -EINVAL; 469 return -EINVAL;
470 470
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 7ba32469c5bd..a460e8c204f4 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -153,7 +153,7 @@ static int omap_ep_enable(struct usb_ep *_ep,
153 u16 maxp; 153 u16 maxp;
154 154
155 /* catch various bogus parameters */ 155 /* catch various bogus parameters */
156 if (!_ep || !desc || ep->ep.desc 156 if (!_ep || !desc
157 || desc->bDescriptorType != USB_DT_ENDPOINT 157 || desc->bDescriptorType != USB_DT_ENDPOINT
158 || ep->bEndpointAddress != desc->bEndpointAddress 158 || ep->bEndpointAddress != desc->bEndpointAddress
159 || ep->maxpacket < usb_endpoint_maxp(desc)) { 159 || ep->maxpacket < usb_endpoint_maxp(desc)) {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index d7c8cb3bf759..f7ff9e8e746a 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -218,7 +218,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
218 struct pxa25x_udc *dev; 218 struct pxa25x_udc *dev;
219 219
220 ep = container_of (_ep, struct pxa25x_ep, ep); 220 ep = container_of (_ep, struct pxa25x_ep, ep);
221 if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name 221 if (!_ep || !desc || _ep->name == ep0name
222 || desc->bDescriptorType != USB_DT_ENDPOINT 222 || desc->bDescriptorType != USB_DT_ENDPOINT
223 || ep->bEndpointAddress != desc->bEndpointAddress 223 || ep->bEndpointAddress != desc->bEndpointAddress
224 || ep->fifo_size < usb_endpoint_maxp (desc)) { 224 || ep->fifo_size < usb_endpoint_maxp (desc)) {
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 36c6836eeb0f..236b271871a0 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -760,7 +760,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
760 u32 ecr = 0; 760 u32 ecr = 0;
761 761
762 hsep = our_ep(_ep); 762 hsep = our_ep(_ep);
763 if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name 763 if (!_ep || !desc || _ep->name == ep0name
764 || desc->bDescriptorType != USB_DT_ENDPOINT 764 || desc->bDescriptorType != USB_DT_ENDPOINT
765 || hsep->bEndpointAddress != desc->bEndpointAddress 765 || hsep->bEndpointAddress != desc->bEndpointAddress
766 || ep_maxpacket(hsep) < usb_endpoint_maxp(desc)) 766 || ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 3de71d37d75e..f2e51f50e528 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1062,7 +1062,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
1062 1062
1063 ep = to_s3c2410_ep(_ep); 1063 ep = to_s3c2410_ep(_ep);
1064 1064
1065 if (!_ep || !desc || ep->ep.desc 1065 if (!_ep || !desc
1066 || _ep->name == ep0name 1066 || _ep->name == ep0name
1067 || desc->bDescriptorType != USB_DT_ENDPOINT) 1067 || desc->bDescriptorType != USB_DT_ENDPOINT)
1068 return -EINVAL; 1068 return -EINVAL;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b100f5f9f4b6..800be38c78b4 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd)
671 hw = ehci->async->hw; 671 hw = ehci->async->hw;
672 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); 672 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
673 hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); 673 hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
674#if defined(CONFIG_PPC_PS3)
674 hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */ 675 hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
676#endif
675 hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); 677 hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
676 hw->hw_qtd_next = EHCI_LIST_END(ehci); 678 hw->hw_qtd_next = EHCI_LIST_END(ehci);
677 ehci->async->qh_state = QH_STATE_LINKED; 679 ehci->async->qh_state = QH_STATE_LINKED;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a44294d13494..17cfb8a1131c 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -43,6 +43,7 @@
43#include <linux/regulator/consumer.h> 43#include <linux/regulator/consumer.h>
44#include <linux/pm_runtime.h> 44#include <linux/pm_runtime.h>
45#include <linux/gpio.h> 45#include <linux/gpio.h>
46#include <linux/clk.h>
46 47
47/* EHCI Register Set */ 48/* EHCI Register Set */
48#define EHCI_INSNREG04 (0xA0) 49#define EHCI_INSNREG04 (0xA0)
@@ -55,6 +56,15 @@
55#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 56#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
56#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 57#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
57 58
59/* Errata i693 */
60static struct clk *utmi_p1_fck;
61static struct clk *utmi_p2_fck;
62static struct clk *xclk60mhsp1_ck;
63static struct clk *xclk60mhsp2_ck;
64static struct clk *usbhost_p1_fck;
65static struct clk *usbhost_p2_fck;
66static struct clk *init_60m_fclk;
67
58/*-------------------------------------------------------------------------*/ 68/*-------------------------------------------------------------------------*/
59 69
60static const struct hc_driver ehci_omap_hc_driver; 70static const struct hc_driver ehci_omap_hc_driver;
@@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)
70 return __raw_readl(base + reg); 80 return __raw_readl(base + reg);
71} 81}
72 82
83/* Erratum i693 workaround sequence */
84static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
85{
86 int ret = 0;
87
88 /* Switch to the internal 60 MHz clock */
89 ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
90 if (ret != 0)
91 ehci_err(ehci, "init_60m_fclk set parent"
92 "failed error:%d\n", ret);
93
94 ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
95 if (ret != 0)
96 ehci_err(ehci, "init_60m_fclk set parent"
97 "failed error:%d\n", ret);
98
99 clk_enable(usbhost_p1_fck);
100 clk_enable(usbhost_p2_fck);
101
102 /* Wait 1ms and switch back to the external clock */
103 mdelay(1);
104 ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
105 if (ret != 0)
106 ehci_err(ehci, "xclk60mhsp1_ck set parent"
107 "failed error:%d\n", ret);
108
109 ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
110 if (ret != 0)
111 ehci_err(ehci, "xclk60mhsp2_ck set parent"
112 "failed error:%d\n", ret);
113
114 clk_disable(usbhost_p1_fck);
115 clk_disable(usbhost_p2_fck);
116}
117
73static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) 118static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
74{ 119{
75 struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); 120 struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
@@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
100 } 145 }
101} 146}
102 147
148static int omap_ehci_hub_control(
149 struct usb_hcd *hcd,
150 u16 typeReq,
151 u16 wValue,
152 u16 wIndex,
153 char *buf,
154 u16 wLength
155)
156{
157 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
158 u32 __iomem *status_reg = &ehci->regs->port_status[
159 (wIndex & 0xff) - 1];
160 u32 temp;
161 unsigned long flags;
162 int retval = 0;
163
164 spin_lock_irqsave(&ehci->lock, flags);
165
166 if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
167 temp = ehci_readl(ehci, status_reg);
168 if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
169 retval = -EPIPE;
170 goto done;
171 }
172
173 temp &= ~PORT_WKCONN_E;
174 temp |= PORT_WKDISC_E | PORT_WKOC_E;
175 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
176
177 omap_ehci_erratum_i693(ehci);
178
179 set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
180 goto done;
181 }
182
183 spin_unlock_irqrestore(&ehci->lock, flags);
184
185 /* Handle the hub control events here */
186 return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
187done:
188 spin_unlock_irqrestore(&ehci->lock, flags);
189 return retval;
190}
191
103static void disable_put_regulator( 192static void disable_put_regulator(
104 struct ehci_hcd_omap_platform_data *pdata) 193 struct ehci_hcd_omap_platform_data *pdata)
105{ 194{
@@ -264,8 +353,76 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
264 /* root ports should always stay powered */ 353 /* root ports should always stay powered */
265 ehci_port_power(omap_ehci, 1); 354 ehci_port_power(omap_ehci, 1);
266 355
356 /* get clocks */
357 utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
358 if (IS_ERR(utmi_p1_fck)) {
359 ret = PTR_ERR(utmi_p1_fck);
360 dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
361 goto err_add_hcd;
362 }
363
364 xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
365 if (IS_ERR(xclk60mhsp1_ck)) {
366 ret = PTR_ERR(xclk60mhsp1_ck);
367 dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
368 goto err_utmi_p1_fck;
369 }
370
371 utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
372 if (IS_ERR(utmi_p2_fck)) {
373 ret = PTR_ERR(utmi_p2_fck);
374 dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
375 goto err_xclk60mhsp1_ck;
376 }
377
378 xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
379 if (IS_ERR(xclk60mhsp2_ck)) {
380 ret = PTR_ERR(xclk60mhsp2_ck);
381 dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
382 goto err_utmi_p2_fck;
383 }
384
385 usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
386 if (IS_ERR(usbhost_p1_fck)) {
387 ret = PTR_ERR(usbhost_p1_fck);
388 dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
389 goto err_xclk60mhsp2_ck;
390 }
391
392 usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
393 if (IS_ERR(usbhost_p2_fck)) {
394 ret = PTR_ERR(usbhost_p2_fck);
395 dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
396 goto err_usbhost_p1_fck;
397 }
398
399 init_60m_fclk = clk_get(dev, "init_60m_fclk");
400 if (IS_ERR(init_60m_fclk)) {
401 ret = PTR_ERR(init_60m_fclk);
402 dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
403 goto err_usbhost_p2_fck;
404 }
405
267 return 0; 406 return 0;
268 407
408err_usbhost_p2_fck:
409 clk_put(usbhost_p2_fck);
410
411err_usbhost_p1_fck:
412 clk_put(usbhost_p1_fck);
413
414err_xclk60mhsp2_ck:
415 clk_put(xclk60mhsp2_ck);
416
417err_utmi_p2_fck:
418 clk_put(utmi_p2_fck);
419
420err_xclk60mhsp1_ck:
421 clk_put(xclk60mhsp1_ck);
422
423err_utmi_p1_fck:
424 clk_put(utmi_p1_fck);
425
269err_add_hcd: 426err_add_hcd:
270 disable_put_regulator(pdata); 427 disable_put_regulator(pdata);
271 pm_runtime_put_sync(dev); 428 pm_runtime_put_sync(dev);
@@ -294,6 +451,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
294 disable_put_regulator(dev->platform_data); 451 disable_put_regulator(dev->platform_data);
295 iounmap(hcd->regs); 452 iounmap(hcd->regs);
296 usb_put_hcd(hcd); 453 usb_put_hcd(hcd);
454
455 clk_put(utmi_p1_fck);
456 clk_put(utmi_p2_fck);
457 clk_put(xclk60mhsp1_ck);
458 clk_put(xclk60mhsp2_ck);
459 clk_put(usbhost_p1_fck);
460 clk_put(usbhost_p2_fck);
461 clk_put(init_60m_fclk);
462
297 pm_runtime_put_sync(dev); 463 pm_runtime_put_sync(dev);
298 pm_runtime_disable(dev); 464 pm_runtime_disable(dev);
299 465
@@ -364,7 +530,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
364 * root hub support 530 * root hub support
365 */ 531 */
366 .hub_status_data = ehci_hub_status_data, 532 .hub_status_data = ehci_hub_status_data,
367 .hub_control = ehci_hub_control, 533 .hub_control = omap_ehci_hub_control,
368 .bus_suspend = ehci_bus_suspend, 534 .bus_suspend = ehci_bus_suspend,
369 .bus_resume = ehci_bus_resume, 535 .bus_resume = ehci_bus_resume,
370 536
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bc94d7bf072d..123481793a47 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
144 hcd->has_tt = 1; 144 hcd->has_tt = 1;
145 tdi_reset(ehci); 145 tdi_reset(ehci);
146 } 146 }
147 if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
148 /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
149 if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
150 ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
151 hcd->broken_pci_sleep = 1;
152 device_set_wakeup_capable(&pdev->dev, false);
153 }
154 }
155 break; 147 break;
156 case PCI_VENDOR_ID_TDI: 148 case PCI_VENDOR_ID_TDI:
157 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 149 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index ca819cdd0c5e..e7cb3925abf8 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
126 goto fail_create_hcd; 126 goto fail_create_hcd;
127 } 127 }
128 128
129 if (pdev->dev.platform_data != NULL) 129 pdata = pdev->dev.platform_data;
130 pdata = pdev->dev.platform_data;
131 130
132 /* initialize hcd */ 131 /* initialize hcd */
133 hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev, 132 hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 9c2cc4633894..e9713d589e30 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
270 * 270 *
271 * Properly shutdown the hcd, call driver's shutdown routine. 271 * Properly shutdown the hcd, call driver's shutdown routine.
272 */ 272 */
273static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op) 273static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
274{ 274{
275 struct usb_hcd *hcd = dev_get_drvdata(&op->dev); 275 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
276 276
277 if (hcd->driver->shutdown) 277 if (hcd->driver->shutdown)
278 hcd->driver->shutdown(hcd); 278 hcd->driver->shutdown(hcd);
279
280 return 0;
281} 279}
282 280
283 281
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 836772dfabd3..2f3619eefefa 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
317} 317}
318 318
319/* Carry out the final steps of resuming the controller device */ 319/* Carry out the final steps of resuming the controller device */
320static void ohci_finish_controller_resume(struct usb_hcd *hcd) 320static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd)
321{ 321{
322 struct ohci_hcd *ohci = hcd_to_ohci(hcd); 322 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
323 int port; 323 int port;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ec4338eec826..77689bd64cac 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
793 struct xhci_virt_device *virt_dev, 793 struct xhci_virt_device *virt_dev,
794 int slot_id) 794 int slot_id)
795{ 795{
796 struct list_head *tt;
797 struct list_head *tt_list_head; 796 struct list_head *tt_list_head;
798 struct list_head *tt_next; 797 struct xhci_tt_bw_info *tt_info, *next;
799 struct xhci_tt_bw_info *tt_info; 798 bool slot_found = false;
800 799
801 /* If the device never made it past the Set Address stage, 800 /* If the device never made it past the Set Address stage,
802 * it may not have the real_port set correctly. 801 * it may not have the real_port set correctly.
@@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
808 } 807 }
809 808
810 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); 809 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
811 if (list_empty(tt_list_head)) 810 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
812 return; 811 /* Multi-TT hubs will have more than one entry */
813 812 if (tt_info->slot_id == slot_id) {
814 list_for_each(tt, tt_list_head) { 813 slot_found = true;
815 tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); 814 list_del(&tt_info->tt_list);
816 if (tt_info->slot_id == slot_id) 815 kfree(tt_info);
816 } else if (slot_found) {
817 break; 817 break;
818 }
818 } 819 }
819 /* Cautionary measure in case the hub was disconnected before we
820 * stored the TT information.
821 */
822 if (tt_info->slot_id != slot_id)
823 return;
824
825 tt_next = tt->next;
826 tt_info = list_entry(tt, struct xhci_tt_bw_info,
827 tt_list);
828 /* Multi-TT hubs will have more than one entry */
829 do {
830 list_del(tt);
831 kfree(tt_info);
832 tt = tt_next;
833 if (list_empty(tt_list_head))
834 break;
835 tt_next = tt->next;
836 tt_info = list_entry(tt, struct xhci_tt_bw_info,
837 tt_list);
838 } while (tt_info->slot_id == slot_id);
839} 820}
840 821
841int xhci_alloc_tt_info(struct xhci_hcd *xhci, 822int xhci_alloc_tt_info(struct xhci_hcd *xhci,
@@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1791{ 1772{
1792 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1773 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1793 struct dev_info *dev_info, *next; 1774 struct dev_info *dev_info, *next;
1794 struct list_head *tt_list_head;
1795 struct list_head *tt;
1796 struct list_head *endpoints;
1797 struct list_head *ep, *q;
1798 struct xhci_tt_bw_info *tt_info;
1799 struct xhci_interval_bw_table *bwt;
1800 struct xhci_virt_ep *virt_ep;
1801
1802 unsigned long flags; 1775 unsigned long flags;
1803 int size; 1776 int size;
1804 int i; 1777 int i, j, num_ports;
1805 1778
1806 /* Free the Event Ring Segment Table and the actual Event Ring */ 1779 /* Free the Event Ring Segment Table and the actual Event Ring */
1807 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1780 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1860 } 1833 }
1861 spin_unlock_irqrestore(&xhci->lock, flags); 1834 spin_unlock_irqrestore(&xhci->lock, flags);
1862 1835
1863 bwt = &xhci->rh_bw->bw_table; 1836 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1864 for (i = 0; i < XHCI_MAX_INTERVAL; i++) { 1837 for (i = 0; i < num_ports; i++) {
1865 endpoints = &bwt->interval_bw[i].endpoints; 1838 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1866 list_for_each_safe(ep, q, endpoints) { 1839 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1867 virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list); 1840 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1868 list_del(&virt_ep->bw_endpoint_list); 1841 while (!list_empty(ep))
1869 kfree(virt_ep); 1842 list_del_init(ep->next);
1870 } 1843 }
1871 } 1844 }
1872 1845
1873 tt_list_head = &xhci->rh_bw->tts; 1846 for (i = 0; i < num_ports; i++) {
1874 list_for_each_safe(tt, q, tt_list_head) { 1847 struct xhci_tt_bw_info *tt, *n;
1875 tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); 1848 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1876 list_del(tt); 1849 list_del(&tt->tt_list);
1877 kfree(tt_info); 1850 kfree(tt);
1851 }
1878 } 1852 }
1879 1853
1880 xhci->num_usb2_ports = 0; 1854 xhci->num_usb2_ports = 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index afdc73ee84a6..a979cd0dbe0f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
795 command = xhci_readl(xhci, &xhci->op_regs->command); 795 command = xhci_readl(xhci, &xhci->op_regs->command);
796 command |= CMD_CSS; 796 command |= CMD_CSS;
797 xhci_writel(xhci, command, &xhci->op_regs->command); 797 xhci_writel(xhci, command, &xhci->op_regs->command);
798 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { 798 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
799 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); 799 xhci_warn(xhci, "WARN: xHC save state timeout\n");
800 spin_unlock_irq(&xhci->lock); 800 spin_unlock_irq(&xhci->lock);
801 return -ETIMEDOUT; 801 return -ETIMEDOUT;
802 } 802 }
@@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
848 command |= CMD_CRS; 848 command |= CMD_CRS;
849 xhci_writel(xhci, command, &xhci->op_regs->command); 849 xhci_writel(xhci, command, &xhci->op_regs->command);
850 if (handshake(xhci, &xhci->op_regs->status, 850 if (handshake(xhci, &xhci->op_regs->status,
851 STS_RESTORE, 0, 10*100)) { 851 STS_RESTORE, 0, 10 * 1000)) {
852 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); 852 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
853 spin_unlock_irq(&xhci->lock); 853 spin_unlock_irq(&xhci->lock);
854 return -ETIMEDOUT; 854 return -ETIMEDOUT;
855 } 855 }
@@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
3906 default: 3906 default:
3907 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 3907 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
3908 __func__); 3908 __func__);
3909 return -EINVAL; 3909 return USB3_LPM_DISABLED;
3910 } 3910 }
3911 3911
3912 if (sel <= max_sel_pel && pel <= max_sel_pel) 3912 if (sel <= max_sel_pel && pel <= max_sel_pel)
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 768b4b55c816..9d63ba4d10d6 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -34,6 +34,7 @@
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35 35
36#include <mach/cputype.h> 36#include <mach/cputype.h>
37#include <mach/hardware.h>
37 38
38#include <asm/mach-types.h> 39#include <asm/mach-types.h>
39 40
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 046c84433cad..371baa0ee509 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -15,7 +15,7 @@
15 */ 15 */
16 16
17/* Integrated highspeed/otg PHY */ 17/* Integrated highspeed/otg PHY */
18#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) 18#define USBPHY_CTL_PADDR 0x01c40034
19#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */ 19#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */
20#define USBPHY_PHYCLKGD BIT(8) 20#define USBPHY_PHYCLKGD BIT(8)
21#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */ 21#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */
@@ -27,7 +27,7 @@
27#define USBPHY_OTGPDWN BIT(1) 27#define USBPHY_OTGPDWN BIT(1)
28#define USBPHY_PHYPDWN BIT(0) 28#define USBPHY_PHYPDWN BIT(0)
29 29
30#define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48) 30#define DM355_DEEPSLEEP_PADDR 0x01c40048
31#define DRVVBUS_FORCE BIT(2) 31#define DRVVBUS_FORCE BIT(2)
32#define DRVVBUS_OVERRIDE BIT(1) 32#define DRVVBUS_OVERRIDE BIT(1)
33 33
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f42c29b11f71..95918dacc99a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1232,6 +1232,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
1232 } 1232 }
1233 1233
1234 musb_ep->desc = NULL; 1234 musb_ep->desc = NULL;
1235 musb_ep->end_point.desc = NULL;
1235 1236
1236 /* abort all pending DMA and requests */ 1237 /* abort all pending DMA and requests */
1237 nuke(musb_ep, -ESHUTDOWN); 1238 nuke(musb_ep, -ESHUTDOWN);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 1b1926200ba7..73d25cd8cba5 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = {
82 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ 82 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
83 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ 83 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
84 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ 84 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
85 { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
85 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ 86 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
86 { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ 87 { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
87 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ 88 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8c084ea34e26..bc912e5a3beb 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = {
737 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 737 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
738 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, 738 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
739 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, 739 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
740 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
740 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 741 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
741 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, 742 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
742 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 743 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f3c7c78ede33..5661c7e2d415 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -784,6 +784,7 @@
784#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 784#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
785#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ 785#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
786#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ 786#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
787#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
787 788
788 789
789/* 790/*
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 105a6d898ca4..9b026bf7afef 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -39,13 +39,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
39 39
40static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ 40static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
41 41
42/* we want to look at all devices, as the vendor/product id can change
43 * depending on the command line argument */
44static const struct usb_device_id generic_serial_ids[] = {
45 {.driver_info = 42},
46 {}
47};
48
49/* All of the device info needed for the Generic Serial Converter */ 42/* All of the device info needed for the Generic Serial Converter */
50struct usb_serial_driver usb_serial_generic_device = { 43struct usb_serial_driver usb_serial_generic_device = {
51 .driver = { 44 .driver = {
@@ -79,7 +72,8 @@ int usb_serial_generic_register(int _debug)
79 USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT; 72 USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
80 73
81 /* register our generic driver with ourselves */ 74 /* register our generic driver with ourselves */
82 retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids); 75 retval = usb_serial_register_drivers(serial_drivers,
76 "usbserial_generic", generic_device_ids);
83#endif 77#endif
84 return retval; 78 return retval;
85} 79}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index d0ec1aa52719..a71fa0aa0406 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -309,13 +309,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
309 MCT_U232_SET_REQUEST_TYPE, 309 MCT_U232_SET_REQUEST_TYPE,
310 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, 310 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
311 WDR_TIMEOUT); 311 WDR_TIMEOUT);
312 if (rc < 0) 312 kfree(buf);
313 dev_err(&serial->dev->dev, 313
314 "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
315 dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); 314 dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
316 315
317 kfree(buf); 316 if (rc < 0) {
318 return rc; 317 dev_err(&serial->dev->dev,
318 "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
319 return rc;
320 }
321 return 0;
319} /* mct_u232_set_modem_ctrl */ 322} /* mct_u232_set_modem_ctrl */
320 323
321static int mct_u232_get_modem_stat(struct usb_serial *serial, 324static int mct_u232_get_modem_stat(struct usb_serial *serial,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 29160f8b5101..57eca2448424 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -190,7 +190,7 @@
190 190
191static int device_type; 191static int device_type;
192 192
193static const struct usb_device_id id_table[] __devinitconst = { 193static const struct usb_device_id id_table[] = {
194 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 194 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
195 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 195 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
196 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)}, 196 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1aae9028cd0b..e668a2460bd4 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -47,6 +47,7 @@
47/* Function prototypes */ 47/* Function prototypes */
48static int option_probe(struct usb_serial *serial, 48static int option_probe(struct usb_serial *serial,
49 const struct usb_device_id *id); 49 const struct usb_device_id *id);
50static void option_release(struct usb_serial *serial);
50static int option_send_setup(struct usb_serial_port *port); 51static int option_send_setup(struct usb_serial_port *port);
51static void option_instat_callback(struct urb *urb); 52static void option_instat_callback(struct urb *urb);
52 53
@@ -150,6 +151,7 @@ static void option_instat_callback(struct urb *urb);
150#define HUAWEI_PRODUCT_E14AC 0x14AC 151#define HUAWEI_PRODUCT_E14AC 0x14AC
151#define HUAWEI_PRODUCT_K3806 0x14AE 152#define HUAWEI_PRODUCT_K3806 0x14AE
152#define HUAWEI_PRODUCT_K4605 0x14C6 153#define HUAWEI_PRODUCT_K4605 0x14C6
154#define HUAWEI_PRODUCT_K5005 0x14C8
153#define HUAWEI_PRODUCT_K3770 0x14C9 155#define HUAWEI_PRODUCT_K3770 0x14C9
154#define HUAWEI_PRODUCT_K3771 0x14CA 156#define HUAWEI_PRODUCT_K3771 0x14CA
155#define HUAWEI_PRODUCT_K4510 0x14CB 157#define HUAWEI_PRODUCT_K4510 0x14CB
@@ -425,7 +427,7 @@ static void option_instat_callback(struct urb *urb);
425#define SAMSUNG_VENDOR_ID 0x04e8 427#define SAMSUNG_VENDOR_ID 0x04e8
426#define SAMSUNG_PRODUCT_GT_B3730 0x6889 428#define SAMSUNG_PRODUCT_GT_B3730 0x6889
427 429
428/* YUGA products www.yuga-info.com*/ 430/* YUGA products www.yuga-info.com gavin.kx@qq.com */
429#define YUGA_VENDOR_ID 0x257A 431#define YUGA_VENDOR_ID 0x257A
430#define YUGA_PRODUCT_CEM600 0x1601 432#define YUGA_PRODUCT_CEM600 0x1601
431#define YUGA_PRODUCT_CEM610 0x1602 433#define YUGA_PRODUCT_CEM610 0x1602
@@ -442,6 +444,8 @@ static void option_instat_callback(struct urb *urb);
442#define YUGA_PRODUCT_CEU516 0x160C 444#define YUGA_PRODUCT_CEU516 0x160C
443#define YUGA_PRODUCT_CEU528 0x160D 445#define YUGA_PRODUCT_CEU528 0x160D
444#define YUGA_PRODUCT_CEU526 0x160F 446#define YUGA_PRODUCT_CEU526 0x160F
447#define YUGA_PRODUCT_CEU881 0x161F
448#define YUGA_PRODUCT_CEU882 0x162F
445 449
446#define YUGA_PRODUCT_CWM600 0x2601 450#define YUGA_PRODUCT_CWM600 0x2601
447#define YUGA_PRODUCT_CWM610 0x2602 451#define YUGA_PRODUCT_CWM610 0x2602
@@ -457,23 +461,26 @@ static void option_instat_callback(struct urb *urb);
457#define YUGA_PRODUCT_CWU518 0x260B 461#define YUGA_PRODUCT_CWU518 0x260B
458#define YUGA_PRODUCT_CWU516 0x260C 462#define YUGA_PRODUCT_CWU516 0x260C
459#define YUGA_PRODUCT_CWU528 0x260D 463#define YUGA_PRODUCT_CWU528 0x260D
464#define YUGA_PRODUCT_CWU581 0x260E
460#define YUGA_PRODUCT_CWU526 0x260F 465#define YUGA_PRODUCT_CWU526 0x260F
461 466#define YUGA_PRODUCT_CWU582 0x261F
462#define YUGA_PRODUCT_CLM600 0x2601 467#define YUGA_PRODUCT_CWU583 0x262F
463#define YUGA_PRODUCT_CLM610 0x2602 468
464#define YUGA_PRODUCT_CLM500 0x2603 469#define YUGA_PRODUCT_CLM600 0x3601
465#define YUGA_PRODUCT_CLM510 0x2604 470#define YUGA_PRODUCT_CLM610 0x3602
466#define YUGA_PRODUCT_CLM800 0x2605 471#define YUGA_PRODUCT_CLM500 0x3603
467#define YUGA_PRODUCT_CLM900 0x2606 472#define YUGA_PRODUCT_CLM510 0x3604
468 473#define YUGA_PRODUCT_CLM800 0x3605
469#define YUGA_PRODUCT_CLU718 0x2607 474#define YUGA_PRODUCT_CLM900 0x3606
470#define YUGA_PRODUCT_CLU716 0x2608 475
471#define YUGA_PRODUCT_CLU728 0x2609 476#define YUGA_PRODUCT_CLU718 0x3607
472#define YUGA_PRODUCT_CLU726 0x260A 477#define YUGA_PRODUCT_CLU716 0x3608
473#define YUGA_PRODUCT_CLU518 0x260B 478#define YUGA_PRODUCT_CLU728 0x3609
474#define YUGA_PRODUCT_CLU516 0x260C 479#define YUGA_PRODUCT_CLU726 0x360A
475#define YUGA_PRODUCT_CLU528 0x260D 480#define YUGA_PRODUCT_CLU518 0x360B
476#define YUGA_PRODUCT_CLU526 0x260F 481#define YUGA_PRODUCT_CLU516 0x360C
482#define YUGA_PRODUCT_CLU528 0x360D
483#define YUGA_PRODUCT_CLU526 0x360F
477 484
478/* Viettel products */ 485/* Viettel products */
479#define VIETTEL_VENDOR_ID 0x2262 486#define VIETTEL_VENDOR_ID 0x2262
@@ -666,6 +673,11 @@ static const struct usb_device_id option_ids[] = {
666 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, 673 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
667 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), 674 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
668 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 675 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
676 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
677 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
678 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
679 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
680 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
669 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, 681 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
670 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, 682 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
671 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, 683 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -1209,6 +1221,11 @@ static const struct usb_device_id option_ids[] = {
1209 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, 1221 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
1210 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, 1222 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
1211 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, 1223 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
1224 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
1225 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
1226 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
1227 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
1228 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
1212 { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, 1229 { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
1213 { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, 1230 { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
1214 { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ 1231 { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
@@ -1245,7 +1262,7 @@ static struct usb_serial_driver option_1port_device = {
1245 .ioctl = usb_wwan_ioctl, 1262 .ioctl = usb_wwan_ioctl,
1246 .attach = usb_wwan_startup, 1263 .attach = usb_wwan_startup,
1247 .disconnect = usb_wwan_disconnect, 1264 .disconnect = usb_wwan_disconnect,
1248 .release = usb_wwan_release, 1265 .release = option_release,
1249 .read_int_callback = option_instat_callback, 1266 .read_int_callback = option_instat_callback,
1250#ifdef CONFIG_PM 1267#ifdef CONFIG_PM
1251 .suspend = usb_wwan_suspend, 1268 .suspend = usb_wwan_suspend,
@@ -1259,35 +1276,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
1259 1276
1260static bool debug; 1277static bool debug;
1261 1278
1262/* per port private data */
1263
1264#define N_IN_URB 4
1265#define N_OUT_URB 4
1266#define IN_BUFLEN 4096
1267#define OUT_BUFLEN 4096
1268
1269struct option_port_private {
1270 /* Input endpoints and buffer for this port */
1271 struct urb *in_urbs[N_IN_URB];
1272 u8 *in_buffer[N_IN_URB];
1273 /* Output endpoints and buffer for this port */
1274 struct urb *out_urbs[N_OUT_URB];
1275 u8 *out_buffer[N_OUT_URB];
1276 unsigned long out_busy; /* Bit vector of URBs in use */
1277 int opened;
1278 struct usb_anchor delayed;
1279
1280 /* Settings for the port */
1281 int rts_state; /* Handshaking pins (outputs) */
1282 int dtr_state;
1283 int cts_state; /* Handshaking pins (inputs) */
1284 int dsr_state;
1285 int dcd_state;
1286 int ri_state;
1287
1288 unsigned long tx_start_time[N_OUT_URB];
1289};
1290
1291module_usb_serial_driver(serial_drivers, option_ids); 1279module_usb_serial_driver(serial_drivers, option_ids);
1292 1280
1293static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason, 1281static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
@@ -1356,12 +1344,22 @@ static int option_probe(struct usb_serial *serial,
1356 return 0; 1344 return 0;
1357} 1345}
1358 1346
1347static void option_release(struct usb_serial *serial)
1348{
1349 struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
1350
1351 usb_wwan_release(serial);
1352
1353 kfree(priv);
1354}
1355
1359static void option_instat_callback(struct urb *urb) 1356static void option_instat_callback(struct urb *urb)
1360{ 1357{
1361 int err; 1358 int err;
1362 int status = urb->status; 1359 int status = urb->status;
1363 struct usb_serial_port *port = urb->context; 1360 struct usb_serial_port *port = urb->context;
1364 struct option_port_private *portdata = usb_get_serial_port_data(port); 1361 struct usb_wwan_port_private *portdata =
1362 usb_get_serial_port_data(port);
1365 1363
1366 dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); 1364 dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
1367 1365
@@ -1421,7 +1419,7 @@ static int option_send_setup(struct usb_serial_port *port)
1421 struct usb_serial *serial = port->serial; 1419 struct usb_serial *serial = port->serial;
1422 struct usb_wwan_intf_private *intfdata = 1420 struct usb_wwan_intf_private *intfdata =
1423 (struct usb_wwan_intf_private *) serial->private; 1421 (struct usb_wwan_intf_private *) serial->private;
1424 struct option_port_private *portdata; 1422 struct usb_wwan_port_private *portdata;
1425 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; 1423 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
1426 int val = 0; 1424 int val = 0;
1427 1425
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 0d5fe59ebb9e..996015c5f1ac 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = {
105 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ 105 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
106 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ 106 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
107 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ 107 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
108 {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */
109 {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */
108 {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ 110 {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
111 {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */
112 {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
113 {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */
114 {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
109 {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ 115 {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
110 {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ 116 {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
111 { } /* Terminating entry */ 117 { } /* Terminating entry */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ba54a0a8235c..d423d36acc04 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -294,6 +294,10 @@ static const struct usb_device_id id_table[] = {
294 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 294 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
295 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 295 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
296 }, 296 },
297 /* AT&T Direct IP LTE modems */
298 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
300 },
297 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ 301 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
298 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 302 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
299 }, 303 },
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6a1b609a0d94..27483f91a4a3 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -659,12 +659,14 @@ exit:
659static struct usb_serial_driver *search_serial_device( 659static struct usb_serial_driver *search_serial_device(
660 struct usb_interface *iface) 660 struct usb_interface *iface)
661{ 661{
662 const struct usb_device_id *id; 662 const struct usb_device_id *id = NULL;
663 struct usb_serial_driver *drv; 663 struct usb_serial_driver *drv;
664 struct usb_driver *driver = to_usb_driver(iface->dev.driver);
664 665
665 /* Check if the usb id matches a known device */ 666 /* Check if the usb id matches a known device */
666 list_for_each_entry(drv, &usb_serial_driver_list, driver_list) { 667 list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
667 id = get_iface_id(drv, iface); 668 if (drv->usb_driver == driver)
669 id = get_iface_id(drv, iface);
668 if (id) 670 if (id)
669 return drv; 671 return drv;
670 } 672 }
@@ -755,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface,
755 757
756 if (retval) { 758 if (retval) {
757 dbg("sub driver rejected device"); 759 dbg("sub driver rejected device");
758 kfree(serial); 760 usb_serial_put(serial);
759 module_put(type->driver.owner); 761 module_put(type->driver.owner);
760 return retval; 762 return retval;
761 } 763 }
@@ -827,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface,
827 */ 829 */
828 if (num_bulk_in == 0 || num_bulk_out == 0) { 830 if (num_bulk_in == 0 || num_bulk_out == 0) {
829 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); 831 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
830 kfree(serial); 832 usb_serial_put(serial);
831 module_put(type->driver.owner); 833 module_put(type->driver.owner);
832 return -ENODEV; 834 return -ENODEV;
833 } 835 }
@@ -841,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface,
841 if (num_ports == 0) { 843 if (num_ports == 0) {
842 dev_err(&interface->dev, 844 dev_err(&interface->dev,
843 "Generic device with no bulk out, not allowed.\n"); 845 "Generic device with no bulk out, not allowed.\n");
844 kfree(serial); 846 usb_serial_put(serial);
845 module_put(type->driver.owner); 847 module_put(type->driver.owner);
846 return -EIO; 848 return -EIO;
847 } 849 }
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1719886bb9be..caf22bf5f822 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1107,6 +1107,13 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
1107 USB_SC_RBC, USB_PR_BULK, NULL, 1107 USB_SC_RBC, USB_PR_BULK, NULL,
1108 0 ), 1108 0 ),
1109 1109
1110/* Feiya QDI U2 DISK, reported by Hans de Goede <hdegoede@redhat.com> */
1111UNUSUAL_DEV( 0x090c, 0x1000, 0x0000, 0xffff,
1112 "Feiya",
1113 "QDI U2 DISK",
1114 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1115 US_FL_NO_READ_CAPACITY_16 ),
1116
1110/* aeb */ 1117/* aeb */
1111UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, 1118UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
1112 "Feiya", 1119 "Feiya",
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index a290be51a1f4..0217f7415ef5 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2210,7 +2210,7 @@ config FB_XILINX
2210 2210
2211config FB_COBALT 2211config FB_COBALT
2212 tristate "Cobalt server LCD frame buffer support" 2212 tristate "Cobalt server LCD frame buffer support"
2213 depends on FB && MIPS_COBALT 2213 depends on FB && (MIPS_COBALT || MIPS_SEAD3)
2214 2214
2215config FB_SH7760 2215config FB_SH7760
2216 bool "SH7760/SH7763/SH7720/SH7721 LCDC support" 2216 bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
@@ -2382,6 +2382,39 @@ config FB_BROADSHEET
2382 and could also have been called by other names when coupled with 2382 and could also have been called by other names when coupled with
2383 a bridge adapter. 2383 a bridge adapter.
2384 2384
2385config FB_AUO_K190X
2386 tristate "AUO-K190X EPD controller support"
2387 depends on FB
2388 select FB_SYS_FILLRECT
2389 select FB_SYS_COPYAREA
2390 select FB_SYS_IMAGEBLIT
2391 select FB_SYS_FOPS
2392 select FB_DEFERRED_IO
2393 help
2394 Provides support for epaper controllers from the K190X series
2395 of AUO. These controllers can be used to drive epaper displays
2396 from Sipix.
2397
2398 This option enables the common support, shared by the individual
2399 controller drivers. You will also have to enable the driver
2400 for the controller type used in your device.
2401
2402config FB_AUO_K1900
2403 tristate "AUO-K1900 EPD controller support"
2404 depends on FB && FB_AUO_K190X
2405 help
2406 This driver implements support for the AUO K1900 epd-controller.
2407 This controller can drive Sipix epaper displays but can only do
2408 serial updates, reducing the number of possible frames per second.
2409
2410config FB_AUO_K1901
2411 tristate "AUO-K1901 EPD controller support"
2412 depends on FB && FB_AUO_K190X
2413 help
2414 This driver implements support for the AUO K1901 epd-controller.
2415 This controller can drive Sipix epaper displays and supports
2416 concurrent updates, making higher frames per second possible.
2417
2385config FB_JZ4740 2418config FB_JZ4740
2386 tristate "JZ4740 LCD framebuffer support" 2419 tristate "JZ4740 LCD framebuffer support"
2387 depends on FB && MACH_JZ4740 2420 depends on FB && MACH_JZ4740
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9356add945b3..ee8dafb69e36 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -118,6 +118,9 @@ obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
118obj-$(CONFIG_FB_MAXINE) += maxinefb.o 118obj-$(CONFIG_FB_MAXINE) += maxinefb.o
119obj-$(CONFIG_FB_METRONOME) += metronomefb.o 119obj-$(CONFIG_FB_METRONOME) += metronomefb.o
120obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o 120obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o
121obj-$(CONFIG_FB_AUO_K190X) += auo_k190x.o
122obj-$(CONFIG_FB_AUO_K1900) += auo_k1900fb.o
123obj-$(CONFIG_FB_AUO_K1901) += auo_k1901fb.o
121obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o 124obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
122obj-$(CONFIG_FB_SH7760) += sh7760fb.o 125obj-$(CONFIG_FB_SH7760) += sh7760fb.o
123obj-$(CONFIG_FB_IMX) += imxfb.o 126obj-$(CONFIG_FB_IMX) += imxfb.o
diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/auo_k1900fb.c
new file mode 100644
index 000000000000..c36cf961dcb2
--- /dev/null
+++ b/drivers/video/auo_k1900fb.c
@@ -0,0 +1,198 @@
1/*
2 * auok190xfb.c -- FB driver for AUO-K1900 controllers
3 *
4 * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
5 *
6 * based on broadsheetfb.c
7 *
8 * Copyright (C) 2008, Jaya Kumar
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
15 *
16 * This driver is written to be used with the AUO-K1900 display controller.
17 *
18 * It is intended to be architecture independent. A board specific driver
19 * must be used to perform all the physical IO interactions.
20 *
21 * The controller supports different update modes:
22 * mode0+1 16 step gray (4bit)
23 * mode2 4 step gray (2bit) - FIXME: add strange refresh
24 * mode3 2 step gray (1bit) - FIXME: add strange refresh
25 * mode4 handwriting mode (strange behaviour)
26 * mode5 automatic selection of update mode
27 */
28
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/mm.h>
34#include <linux/slab.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/fb.h>
38#include <linux/init.h>
39#include <linux/platform_device.h>
40#include <linux/list.h>
41#include <linux/firmware.h>
42#include <linux/gpio.h>
43#include <linux/pm_runtime.h>
44
45#include <video/auo_k190xfb.h>
46
47#include "auo_k190x.h"
48
49/*
50 * AUO-K1900 specific commands
51 */
52
53#define AUOK1900_CMD_PARTIALDISP 0x1001
54#define AUOK1900_CMD_ROTATION 0x1006
55#define AUOK1900_CMD_LUT_STOP 0x1009
56
57#define AUOK1900_INIT_TEMP_AVERAGE (1 << 13)
58#define AUOK1900_INIT_ROTATE(_x) ((_x & 0x3) << 10)
59#define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2)
60
61static void auok1900_init(struct auok190xfb_par *par)
62{
63 struct auok190x_board *board = par->board;
64 u16 init_param = 0;
65
66 init_param |= AUOK1900_INIT_TEMP_AVERAGE;
67 init_param |= AUOK1900_INIT_ROTATE(par->rotation);
68 init_param |= AUOK190X_INIT_INVERSE_WHITE;
69 init_param |= AUOK190X_INIT_FORMAT0;
70 init_param |= AUOK1900_INIT_RESOLUTION(par->resolution);
71 init_param |= AUOK190X_INIT_SHIFT_RIGHT;
72
73 auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
74
75 /* let the controller finish */
76 board->wait_for_rdy(par);
77}
78
79static void auok1900_update_region(struct auok190xfb_par *par, int mode,
80 u16 y1, u16 y2)
81{
82 struct device *dev = par->info->device;
83 unsigned char *buf = (unsigned char *)par->info->screen_base;
84 int xres = par->info->var.xres;
85 u16 args[4];
86
87 pm_runtime_get_sync(dev);
88
89 mutex_lock(&(par->io_lock));
90
91 /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
92 y1 &= 0xfffe;
93 y2 &= 0xfffe;
94
95 dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
96 1, y1+1, xres, y2-y1, mode);
97
98 /* to FIX handle different partial update modes */
99 args[0] = mode | 1;
100 args[1] = y1 + 1;
101 args[2] = xres;
102 args[3] = y2 - y1;
103 buf += y1 * xres;
104 auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args,
105 ((y2 - y1) * xres)/2, (u16 *) buf);
106 auok190x_send_command(par, AUOK190X_CMD_DATA_STOP);
107
108 par->update_cnt++;
109
110 mutex_unlock(&(par->io_lock));
111
112 pm_runtime_mark_last_busy(dev);
113 pm_runtime_put_autosuspend(dev);
114}
115
116static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par,
117 u16 y1, u16 y2)
118{
119 int mode;
120
121 if (par->update_mode < 0) {
122 mode = AUOK190X_UPDATE_MODE(1);
123 par->last_mode = -1;
124 } else {
125 mode = AUOK190X_UPDATE_MODE(par->update_mode);
126 par->last_mode = par->update_mode;
127 }
128
129 if (par->flash)
130 mode |= AUOK190X_UPDATE_NONFLASH;
131
132 auok1900_update_region(par, mode, y1, y2);
133}
134
135static void auok1900fb_dpy_update(struct auok190xfb_par *par)
136{
137 int mode;
138
139 if (par->update_mode < 0) {
140 mode = AUOK190X_UPDATE_MODE(0);
141 par->last_mode = -1;
142 } else {
143 mode = AUOK190X_UPDATE_MODE(par->update_mode);
144 par->last_mode = par->update_mode;
145 }
146
147 if (par->flash)
148 mode |= AUOK190X_UPDATE_NONFLASH;
149
150 auok1900_update_region(par, mode, 0, par->info->var.yres);
151 par->update_cnt = 0;
152}
153
154static bool auok1900fb_need_refresh(struct auok190xfb_par *par)
155{
156 return (par->update_cnt > 10);
157}
158
159static int __devinit auok1900fb_probe(struct platform_device *pdev)
160{
161 struct auok190x_init_data init;
162 struct auok190x_board *board;
163
164 /* pick up board specific routines */
165 board = pdev->dev.platform_data;
166 if (!board)
167 return -EINVAL;
168
169 /* fill temporary init struct for common init */
170 init.id = "auo_k1900fb";
171 init.board = board;
172 init.update_partial = auok1900fb_dpy_update_pages;
173 init.update_all = auok1900fb_dpy_update;
174 init.need_refresh = auok1900fb_need_refresh;
175 init.init = auok1900_init;
176
177 return auok190x_common_probe(pdev, &init);
178}
179
180static int __devexit auok1900fb_remove(struct platform_device *pdev)
181{
182 return auok190x_common_remove(pdev);
183}
184
185static struct platform_driver auok1900fb_driver = {
186 .probe = auok1900fb_probe,
187 .remove = __devexit_p(auok1900fb_remove),
188 .driver = {
189 .owner = THIS_MODULE,
190 .name = "auo_k1900fb",
191 .pm = &auok190x_pm,
192 },
193};
194module_platform_driver(auok1900fb_driver);
195
196MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller");
197MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
198MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/auo_k1901fb.c
new file mode 100644
index 000000000000..1c054c18616e
--- /dev/null
+++ b/drivers/video/auo_k1901fb.c
@@ -0,0 +1,251 @@
1/*
2 * auok190xfb.c -- FB driver for AUO-K1901 controllers
3 *
4 * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
5 *
6 * based on broadsheetfb.c
7 *
8 * Copyright (C) 2008, Jaya Kumar
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
15 *
16 * This driver is written to be used with the AUO-K1901 display controller.
17 *
18 * It is intended to be architecture independent. A board specific driver
19 * must be used to perform all the physical IO interactions.
20 *
21 * The controller supports different update modes:
22 * mode0+1 16 step gray (4bit)
23 * mode2+3 4 step gray (2bit)
24 * mode4+5 2 step gray (1bit)
25 * - mode4 is described as "without LUT"
26 * mode7 automatic selection of update mode
27 *
28 * The most interesting difference to the K1900 is the ability to do screen
29 * updates in an asynchronous fashion. Where the K1900 needs to wait for the
30 * current update to complete, the K1901 can process later updates already.
31 */
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/errno.h>
36#include <linux/string.h>
37#include <linux/mm.h>
38#include <linux/slab.h>
39#include <linux/delay.h>
40#include <linux/interrupt.h>
41#include <linux/fb.h>
42#include <linux/init.h>
43#include <linux/platform_device.h>
44#include <linux/list.h>
45#include <linux/firmware.h>
46#include <linux/gpio.h>
47#include <linux/pm_runtime.h>
48
49#include <video/auo_k190xfb.h>
50
51#include "auo_k190x.h"
52
53/*
54 * AUO-K1901 specific commands
55 */
56
57#define AUOK1901_CMD_LUT_INTERFACE 0x0005
58#define AUOK1901_CMD_DMA_START 0x1001
59#define AUOK1901_CMD_CURSOR_START 0x1007
60#define AUOK1901_CMD_CURSOR_STOP AUOK190X_CMD_DATA_STOP
61#define AUOK1901_CMD_DDMA_START 0x1009
62
63#define AUOK1901_INIT_GATE_PULSE_LOW (0 << 14)
64#define AUOK1901_INIT_GATE_PULSE_HIGH (1 << 14)
65#define AUOK1901_INIT_SINGLE_GATE (0 << 13)
66#define AUOK1901_INIT_DOUBLE_GATE (1 << 13)
67
68/* Bits to pixels
69 * Mode 15-12 11-8 7-4 3-0
70 * format2 2 T 1 T
71 * format3 1 T 2 T
72 * format4 T 2 T 1
73 * format5 T 1 T 2
74 *
75 * halftone modes:
76 * format6 2 2 1 1
77 * format7 1 1 2 2
78 */
79#define AUOK1901_INIT_FORMAT2 (1 << 7)
80#define AUOK1901_INIT_FORMAT3 ((1 << 7) | (1 << 6))
81#define AUOK1901_INIT_FORMAT4 (1 << 8)
82#define AUOK1901_INIT_FORMAT5 ((1 << 8) | (1 << 6))
83#define AUOK1901_INIT_FORMAT6 ((1 << 8) | (1 << 7))
84#define AUOK1901_INIT_FORMAT7 ((1 << 8) | (1 << 7) | (1 << 6))
85
86/* res[4] to bit 10
87 * res[3-0] to bits 5-2
88 */
89#define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \
90 | ((_res & 0xf) << 2))
91
92/*
93 * portrait / landscape orientation in AUOK1901_CMD_DMA_START
94 */
95#define AUOK1901_DMA_ROTATE90(_rot) ((_rot & 1) << 13)
96
97/*
98 * equivalent to 1 << 11, needs the ~ to have same rotation like K1900
99 */
100#define AUOK1901_DDMA_ROTATE180(_rot) ((~_rot & 2) << 10)
101
102static void auok1901_init(struct auok190xfb_par *par)
103{
104 struct auok190x_board *board = par->board;
105 u16 init_param = 0;
106
107 init_param |= AUOK190X_INIT_INVERSE_WHITE;
108 init_param |= AUOK190X_INIT_FORMAT0;
109 init_param |= AUOK1901_INIT_RESOLUTION(par->resolution);
110 init_param |= AUOK190X_INIT_SHIFT_LEFT;
111
112 auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
113
114 /* let the controller finish */
115 board->wait_for_rdy(par);
116}
117
118static void auok1901_update_region(struct auok190xfb_par *par, int mode,
119 u16 y1, u16 y2)
120{
121 struct device *dev = par->info->device;
122 unsigned char *buf = (unsigned char *)par->info->screen_base;
123 int xres = par->info->var.xres;
124 u16 args[5];
125
126 pm_runtime_get_sync(dev);
127
128 mutex_lock(&(par->io_lock));
129
130 /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
131 y1 &= 0xfffe;
132 y2 &= 0xfffe;
133
134 dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
135 1, y1+1, xres, y2-y1, mode);
136
137 /* K1901: first transfer the region data */
138 args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1;
139 args[1] = y1 + 1;
140 args[2] = xres;
141 args[3] = y2 - y1;
142 buf += y1 * xres;
143 auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4,
144 args, ((y2 - y1) * xres)/2,
145 (u16 *) buf);
146 auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP);
147
148 /* K1901: second tell the controller to update the region with mode */
149 args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation);
150 args[1] = 1;
151 args[2] = y1 + 1;
152 args[3] = xres;
153 args[4] = y2 - y1;
154 auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args);
155
156 par->update_cnt++;
157
158 mutex_unlock(&(par->io_lock));
159
160 pm_runtime_mark_last_busy(dev);
161 pm_runtime_put_autosuspend(dev);
162}
163
164static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par,
165 u16 y1, u16 y2)
166{
167 int mode;
168
169 if (par->update_mode < 0) {
170 mode = AUOK190X_UPDATE_MODE(1);
171 par->last_mode = -1;
172 } else {
173 mode = AUOK190X_UPDATE_MODE(par->update_mode);
174 par->last_mode = par->update_mode;
175 }
176
177 if (par->flash)
178 mode |= AUOK190X_UPDATE_NONFLASH;
179
180 auok1901_update_region(par, mode, y1, y2);
181}
182
183static void auok1901fb_dpy_update(struct auok190xfb_par *par)
184{
185 int mode;
186
187 /* When doing full updates, wait for the controller to be ready
188 * This will hopefully catch some hangs of the K1901
189 */
190 par->board->wait_for_rdy(par);
191
192 if (par->update_mode < 0) {
193 mode = AUOK190X_UPDATE_MODE(0);
194 par->last_mode = -1;
195 } else {
196 mode = AUOK190X_UPDATE_MODE(par->update_mode);
197 par->last_mode = par->update_mode;
198 }
199
200 if (par->flash)
201 mode |= AUOK190X_UPDATE_NONFLASH;
202
203 auok1901_update_region(par, mode, 0, par->info->var.yres);
204 par->update_cnt = 0;
205}
206
207static bool auok1901fb_need_refresh(struct auok190xfb_par *par)
208{
209 return (par->update_cnt > 10);
210}
211
212static int __devinit auok1901fb_probe(struct platform_device *pdev)
213{
214 struct auok190x_init_data init;
215 struct auok190x_board *board;
216
217 /* pick up board specific routines */
218 board = pdev->dev.platform_data;
219 if (!board)
220 return -EINVAL;
221
222 /* fill temporary init struct for common init */
223 init.id = "auo_k1901fb";
224 init.board = board;
225 init.update_partial = auok1901fb_dpy_update_pages;
226 init.update_all = auok1901fb_dpy_update;
227 init.need_refresh = auok1901fb_need_refresh;
228 init.init = auok1901_init;
229
230 return auok190x_common_probe(pdev, &init);
231}
232
233static int __devexit auok1901fb_remove(struct platform_device *pdev)
234{
235 return auok190x_common_remove(pdev);
236}
237
238static struct platform_driver auok1901fb_driver = {
239 .probe = auok1901fb_probe,
240 .remove = __devexit_p(auok1901fb_remove),
241 .driver = {
242 .owner = THIS_MODULE,
243 .name = "auo_k1901fb",
244 .pm = &auok190x_pm,
245 },
246};
247module_platform_driver(auok1901fb_driver);
248
249MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller");
250MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
251MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
new file mode 100644
index 000000000000..77da6a2f43dc
--- /dev/null
+++ b/drivers/video/auo_k190x.c
@@ -0,0 +1,1046 @@
1/*
2 * Common code for AUO-K190X framebuffer drivers
3 *
4 * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/gpio.h>
14#include <linux/pm_runtime.h>
15#include <linux/fb.h>
16#include <linux/delay.h>
17#include <linux/uaccess.h>
18#include <linux/vmalloc.h>
19#include <linux/regulator/consumer.h>
20
21#include <video/auo_k190xfb.h>
22
23#include "auo_k190x.h"
24
25struct panel_info {
26 int w;
27 int h;
28};
29
30/* table of panel specific parameters to be indexed into by the board drivers */
31static struct panel_info panel_table[] = {
32 /* standard 6" */
33 [AUOK190X_RESOLUTION_800_600] = {
34 .w = 800,
35 .h = 600,
36 },
37 /* standard 9" */
38 [AUOK190X_RESOLUTION_1024_768] = {
39 .w = 1024,
40 .h = 768,
41 },
42};
43
44/*
45 * private I80 interface to the board driver
46 */
47
48static void auok190x_issue_data(struct auok190xfb_par *par, u16 data)
49{
50 par->board->set_ctl(par, AUOK190X_I80_WR, 0);
51 par->board->set_hdb(par, data);
52 par->board->set_ctl(par, AUOK190X_I80_WR, 1);
53}
54
55static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data)
56{
57 par->board->set_ctl(par, AUOK190X_I80_DC, 0);
58 auok190x_issue_data(par, data);
59 par->board->set_ctl(par, AUOK190X_I80_DC, 1);
60}
61
62static int auok190x_issue_pixels(struct auok190xfb_par *par, int size,
63 u16 *data)
64{
65 struct device *dev = par->info->device;
66 int i;
67 u16 tmp;
68
69 if (size & 3) {
70 dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n",
71 size);
72 return -EINVAL;
73 }
74
75 for (i = 0; i < (size >> 1); i++) {
76 par->board->set_ctl(par, AUOK190X_I80_WR, 0);
77
78 /* simple reduction of 8bit staticgray to 4bit gray
79 * combines 4 * 4bit pixel values into a 16bit value
80 */
81 tmp = (data[2*i] & 0xF0) >> 4;
82 tmp |= (data[2*i] & 0xF000) >> 8;
83 tmp |= (data[2*i+1] & 0xF0) << 4;
84 tmp |= (data[2*i+1] & 0xF000);
85
86 par->board->set_hdb(par, tmp);
87 par->board->set_ctl(par, AUOK190X_I80_WR, 1);
88 }
89
90 return 0;
91}
92
93static u16 auok190x_read_data(struct auok190xfb_par *par)
94{
95 u16 data;
96
97 par->board->set_ctl(par, AUOK190X_I80_OE, 0);
98 data = par->board->get_hdb(par);
99 par->board->set_ctl(par, AUOK190X_I80_OE, 1);
100
101 return data;
102}
103
104/*
105 * Command interface for the controller drivers
106 */
107
108void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data)
109{
110 par->board->set_ctl(par, AUOK190X_I80_CS, 0);
111 auok190x_issue_cmd(par, data);
112 par->board->set_ctl(par, AUOK190X_I80_CS, 1);
113}
114EXPORT_SYMBOL_GPL(auok190x_send_command_nowait);
115
116void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
117 int argc, u16 *argv)
118{
119 int i;
120
121 par->board->set_ctl(par, AUOK190X_I80_CS, 0);
122 auok190x_issue_cmd(par, cmd);
123
124 for (i = 0; i < argc; i++)
125 auok190x_issue_data(par, argv[i]);
126 par->board->set_ctl(par, AUOK190X_I80_CS, 1);
127}
128EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait);
129
130int auok190x_send_command(struct auok190xfb_par *par, u16 data)
131{
132 int ret;
133
134 ret = par->board->wait_for_rdy(par);
135 if (ret)
136 return ret;
137
138 auok190x_send_command_nowait(par, data);
139 return 0;
140}
141EXPORT_SYMBOL_GPL(auok190x_send_command);
142
143int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
144 int argc, u16 *argv)
145{
146 int ret;
147
148 ret = par->board->wait_for_rdy(par);
149 if (ret)
150 return ret;
151
152 auok190x_send_cmdargs_nowait(par, cmd, argc, argv);
153 return 0;
154}
155EXPORT_SYMBOL_GPL(auok190x_send_cmdargs);
156
157int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
158 int argc, u16 *argv)
159{
160 int i, ret;
161
162 ret = par->board->wait_for_rdy(par);
163 if (ret)
164 return ret;
165
166 par->board->set_ctl(par, AUOK190X_I80_CS, 0);
167 auok190x_issue_cmd(par, cmd);
168
169 for (i = 0; i < argc; i++)
170 argv[i] = auok190x_read_data(par);
171 par->board->set_ctl(par, AUOK190X_I80_CS, 1);
172
173 return 0;
174}
175EXPORT_SYMBOL_GPL(auok190x_read_cmdargs);
176
177void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd,
178 int argc, u16 *argv, int size, u16 *data)
179{
180 int i;
181
182 par->board->set_ctl(par, AUOK190X_I80_CS, 0);
183
184 auok190x_issue_cmd(par, cmd);
185
186 for (i = 0; i < argc; i++)
187 auok190x_issue_data(par, argv[i]);
188
189 auok190x_issue_pixels(par, size, data);
190
191 par->board->set_ctl(par, AUOK190X_I80_CS, 1);
192}
193EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait);
194
195int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
196 int argc, u16 *argv, int size, u16 *data)
197{
198 int ret;
199
200 ret = par->board->wait_for_rdy(par);
201 if (ret)
202 return ret;
203
204 auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data);
205
206 return 0;
207}
208EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels);
209
210/*
211 * fbdefio callbacks - common on both controllers.
212 */
213
214static void auok190xfb_dpy_first_io(struct fb_info *info)
215{
216 /* tell runtime-pm that we wish to use the device in a short time */
217 pm_runtime_get(info->device);
218}
219
220/* this is called back from the deferred io workqueue */
221static void auok190xfb_dpy_deferred_io(struct fb_info *info,
222 struct list_head *pagelist)
223{
224 struct fb_deferred_io *fbdefio = info->fbdefio;
225 struct auok190xfb_par *par = info->par;
226 u16 yres = info->var.yres;
227 u16 xres = info->var.xres;
228 u16 y1 = 0, h = 0;
229 int prev_index = -1;
230 struct page *cur;
231 int h_inc;
232 int threshold;
233
234 if (!list_empty(pagelist))
235 /* the device resume should've been requested through first_io,
236 * if the resume did not finish until now, wait for it.
237 */
238 pm_runtime_barrier(info->device);
239 else
240 /* We reached this via the fsync or some other way.
241 * In either case the first_io function did not run,
242 * so we runtime_resume the device here synchronously.
243 */
244 pm_runtime_get_sync(info->device);
245
246 /* Do a full screen update every n updates to prevent
247 * excessive darkening of the Sipix display.
248 * If we do this, there is no need to walk the pages.
249 */
250 if (par->need_refresh(par)) {
251 par->update_all(par);
252 goto out;
253 }
254
255 /* height increment is fixed per page */
256 h_inc = DIV_ROUND_UP(PAGE_SIZE , xres);
257
258 /* calculate number of pages from pixel height */
259 threshold = par->consecutive_threshold / h_inc;
260 if (threshold < 1)
261 threshold = 1;
262
263 /* walk the written page list and swizzle the data */
264 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
265 if (prev_index < 0) {
266 /* just starting so assign first page */
267 y1 = (cur->index << PAGE_SHIFT) / xres;
268 h = h_inc;
269 } else if ((cur->index - prev_index) <= threshold) {
270 /* page is within our threshold for single updates */
271 h += h_inc * (cur->index - prev_index);
272 } else {
273 /* page not consecutive, issue previous update first */
274 par->update_partial(par, y1, y1 + h);
275
276 /* start over with our non consecutive page */
277 y1 = (cur->index << PAGE_SHIFT) / xres;
278 h = h_inc;
279 }
280 prev_index = cur->index;
281 }
282
283 /* if we still have any pages to update we do so now */
284 if (h >= yres)
285 /* its a full screen update, just do it */
286 par->update_all(par);
287 else
288 par->update_partial(par, y1, min((u16) (y1 + h), yres));
289
290out:
291 pm_runtime_mark_last_busy(info->device);
292 pm_runtime_put_autosuspend(info->device);
293}
294
295/*
296 * framebuffer operations
297 */
298
299/*
300 * this is the slow path from userspace. they can seek and write to
301 * the fb. it's inefficient to do anything less than a full screen draw
302 */
303static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf,
304 size_t count, loff_t *ppos)
305{
306 struct auok190xfb_par *par = info->par;
307 unsigned long p = *ppos;
308 void *dst;
309 int err = 0;
310 unsigned long total_size;
311
312 if (info->state != FBINFO_STATE_RUNNING)
313 return -EPERM;
314
315 total_size = info->fix.smem_len;
316
317 if (p > total_size)
318 return -EFBIG;
319
320 if (count > total_size) {
321 err = -EFBIG;
322 count = total_size;
323 }
324
325 if (count + p > total_size) {
326 if (!err)
327 err = -ENOSPC;
328
329 count = total_size - p;
330 }
331
332 dst = (void *)(info->screen_base + p);
333
334 if (copy_from_user(dst, buf, count))
335 err = -EFAULT;
336
337 if (!err)
338 *ppos += count;
339
340 par->update_all(par);
341
342 return (err) ? err : count;
343}
344
345static void auok190xfb_fillrect(struct fb_info *info,
346 const struct fb_fillrect *rect)
347{
348 struct auok190xfb_par *par = info->par;
349
350 sys_fillrect(info, rect);
351
352 par->update_all(par);
353}
354
355static void auok190xfb_copyarea(struct fb_info *info,
356 const struct fb_copyarea *area)
357{
358 struct auok190xfb_par *par = info->par;
359
360 sys_copyarea(info, area);
361
362 par->update_all(par);
363}
364
365static void auok190xfb_imageblit(struct fb_info *info,
366 const struct fb_image *image)
367{
368 struct auok190xfb_par *par = info->par;
369
370 sys_imageblit(info, image);
371
372 par->update_all(par);
373}
374
375static int auok190xfb_check_var(struct fb_var_screeninfo *var,
376 struct fb_info *info)
377{
378 if (info->var.xres != var->xres || info->var.yres != var->yres ||
379 info->var.xres_virtual != var->xres_virtual ||
380 info->var.yres_virtual != var->yres_virtual) {
381 pr_info("%s: Resolution not supported: X%u x Y%u\n",
382 __func__, var->xres, var->yres);
383 return -EINVAL;
384 }
385
386 /*
387 * Memory limit
388 */
389
390 if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
391 pr_info("%s: Memory Limit requested yres_virtual = %u\n",
392 __func__, var->yres_virtual);
393 return -ENOMEM;
394 }
395
396 return 0;
397}
398
399static struct fb_ops auok190xfb_ops = {
400 .owner = THIS_MODULE,
401 .fb_read = fb_sys_read,
402 .fb_write = auok190xfb_write,
403 .fb_fillrect = auok190xfb_fillrect,
404 .fb_copyarea = auok190xfb_copyarea,
405 .fb_imageblit = auok190xfb_imageblit,
406 .fb_check_var = auok190xfb_check_var,
407};
408
409/*
410 * Controller-functions common to both K1900 and K1901
411 */
412
413static int auok190x_read_temperature(struct auok190xfb_par *par)
414{
415 struct device *dev = par->info->device;
416 u16 data[4];
417 int temp;
418
419 pm_runtime_get_sync(dev);
420
421 mutex_lock(&(par->io_lock));
422
423 auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
424
425 mutex_unlock(&(par->io_lock));
426
427 pm_runtime_mark_last_busy(dev);
428 pm_runtime_put_autosuspend(dev);
429
430 /* sanitize and split of half-degrees for now */
431 temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1);
432
433 /* handle positive and negative temperatures */
434 if (temp >= 201)
435 return (255 - temp + 1) * (-1);
436 else
437 return temp;
438}
439
440static void auok190x_identify(struct auok190xfb_par *par)
441{
442 struct device *dev = par->info->device;
443 u16 data[4];
444
445 pm_runtime_get_sync(dev);
446
447 mutex_lock(&(par->io_lock));
448
449 auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
450
451 mutex_unlock(&(par->io_lock));
452
453 par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK;
454
455 par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]);
456 par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]);
457 par->panel_model = AUOK190X_VERSION_MODEL(data[2]);
458
459 par->tcon_version = AUOK190X_VERSION_TCON(data[3]);
460 par->lut_version = AUOK190X_VERSION_LUT(data[3]);
461
462 dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x",
463 par->panel_size_int, par->panel_size_float, par->panel_model,
464 par->epd_type, par->tcon_version, par->lut_version);
465
466 pm_runtime_mark_last_busy(dev);
467 pm_runtime_put_autosuspend(dev);
468}
469
470/*
471 * Sysfs functions
472 */
473
474static ssize_t update_mode_show(struct device *dev,
475 struct device_attribute *attr, char *buf)
476{
477 struct fb_info *info = dev_get_drvdata(dev);
478 struct auok190xfb_par *par = info->par;
479
480 return sprintf(buf, "%d\n", par->update_mode);
481}
482
483static ssize_t update_mode_store(struct device *dev,
484 struct device_attribute *attr,
485 const char *buf, size_t count)
486{
487 struct fb_info *info = dev_get_drvdata(dev);
488 struct auok190xfb_par *par = info->par;
489 int mode, ret;
490
491 ret = kstrtoint(buf, 10, &mode);
492 if (ret)
493 return ret;
494
495 par->update_mode = mode;
496
497 /* if we enter a better mode, do a full update */
498 if (par->last_mode > 1 && mode < par->last_mode)
499 par->update_all(par);
500
501 return count;
502}
503
504static ssize_t flash_show(struct device *dev, struct device_attribute *attr,
505 char *buf)
506{
507 struct fb_info *info = dev_get_drvdata(dev);
508 struct auok190xfb_par *par = info->par;
509
510 return sprintf(buf, "%d\n", par->flash);
511}
512
513static ssize_t flash_store(struct device *dev, struct device_attribute *attr,
514 const char *buf, size_t count)
515{
516 struct fb_info *info = dev_get_drvdata(dev);
517 struct auok190xfb_par *par = info->par;
518 int flash, ret;
519
520 ret = kstrtoint(buf, 10, &flash);
521 if (ret)
522 return ret;
523
524 if (flash > 0)
525 par->flash = 1;
526 else
527 par->flash = 0;
528
529 return count;
530}
531
532static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
533 char *buf)
534{
535 struct fb_info *info = dev_get_drvdata(dev);
536 struct auok190xfb_par *par = info->par;
537 int temp;
538
539 temp = auok190x_read_temperature(par);
540 return sprintf(buf, "%d\n", temp);
541}
542
543static DEVICE_ATTR(update_mode, 0644, update_mode_show, update_mode_store);
544static DEVICE_ATTR(flash, 0644, flash_show, flash_store);
545static DEVICE_ATTR(temp, 0644, temp_show, NULL);
546
547static struct attribute *auok190x_attributes[] = {
548 &dev_attr_update_mode.attr,
549 &dev_attr_flash.attr,
550 &dev_attr_temp.attr,
551 NULL
552};
553
554static const struct attribute_group auok190x_attr_group = {
555 .attrs = auok190x_attributes,
556};
557
558static int auok190x_power(struct auok190xfb_par *par, bool on)
559{
560 struct auok190x_board *board = par->board;
561 int ret;
562
563 if (on) {
564 /* We should maintain POWER up for at least 80ms before set
565 * RST_N and SLP_N to high (TCON spec 20100803_v35 p59)
566 */
567 ret = regulator_enable(par->regulator);
568 if (ret)
569 return ret;
570
571 msleep(200);
572 gpio_set_value(board->gpio_nrst, 1);
573 gpio_set_value(board->gpio_nsleep, 1);
574 msleep(200);
575 } else {
576 regulator_disable(par->regulator);
577 gpio_set_value(board->gpio_nrst, 0);
578 gpio_set_value(board->gpio_nsleep, 0);
579 }
580
581 return 0;
582}
583
584/*
585 * Recovery - powercycle the controller
586 */
587
588static void auok190x_recover(struct auok190xfb_par *par)
589{
590 auok190x_power(par, 0);
591 msleep(100);
592 auok190x_power(par, 1);
593
594 par->init(par);
595
596 /* wait for init to complete */
597 par->board->wait_for_rdy(par);
598}
599
600/*
601 * Power-management
602 */
603
604#ifdef CONFIG_PM
605static int auok190x_runtime_suspend(struct device *dev)
606{
607 struct platform_device *pdev = to_platform_device(dev);
608 struct fb_info *info = platform_get_drvdata(pdev);
609 struct auok190xfb_par *par = info->par;
610 struct auok190x_board *board = par->board;
611 u16 standby_param;
612
613 /* take and keep the lock until we are resumed, as the controller
614 * will never reach the non-busy state when in standby mode
615 */
616 mutex_lock(&(par->io_lock));
617
618 if (par->standby) {
619 dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n");
620 mutex_unlock(&(par->io_lock));
621 return 0;
622 }
623
624 /* according to runtime_pm.txt runtime_suspend only means, that the
625 * device will not process data and will not communicate with the CPU
626 * As we hold the lock, this stays true even without standby
627 */
628 if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
629 dev_dbg(dev, "runtime suspend without standby\n");
630 goto finish;
631 } else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) {
632 /* for some TCON versions STANDBY expects a parameter (0) but
633 * it seems the real tcon version has to be determined yet.
634 */
635 dev_dbg(dev, "runtime suspend with additional empty param\n");
636 standby_param = 0;
637 auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1,
638 &standby_param);
639 } else {
640 dev_dbg(dev, "runtime suspend without param\n");
641 auok190x_send_command(par, AUOK190X_CMD_STANDBY);
642 }
643
644 msleep(64);
645
646finish:
647 par->standby = 1;
648
649 return 0;
650}
651
652static int auok190x_runtime_resume(struct device *dev)
653{
654 struct platform_device *pdev = to_platform_device(dev);
655 struct fb_info *info = platform_get_drvdata(pdev);
656 struct auok190xfb_par *par = info->par;
657 struct auok190x_board *board = par->board;
658
659 if (!par->standby) {
660 dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n");
661 return 0;
662 }
663
664 if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
665 dev_dbg(dev, "runtime resume without standby\n");
666 } else {
667 /* when in standby, controller is always busy
668 * and only accepts the wakeup command
669 */
670 dev_dbg(dev, "runtime resume from standby\n");
671 auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP);
672
673 msleep(160);
674
675 /* wait for the controller to be ready and release the lock */
676 board->wait_for_rdy(par);
677 }
678
679 par->standby = 0;
680
681 mutex_unlock(&(par->io_lock));
682
683 return 0;
684}
685
686static int auok190x_suspend(struct device *dev)
687{
688 struct platform_device *pdev = to_platform_device(dev);
689 struct fb_info *info = platform_get_drvdata(pdev);
690 struct auok190xfb_par *par = info->par;
691 struct auok190x_board *board = par->board;
692 int ret;
693
694 dev_dbg(dev, "suspend\n");
695 if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
696 /* suspend via powering off the ic */
697 dev_dbg(dev, "suspend with broken standby\n");
698
699 auok190x_power(par, 0);
700 } else {
701 dev_dbg(dev, "suspend using sleep\n");
702
703 /* the sleep state can only be entered from the standby state.
704 * pm_runtime_get_noresume gets called before the suspend call.
705 * So the devices usage count is >0 but it is not necessarily
706 * active.
707 */
708 if (!pm_runtime_status_suspended(dev)) {
709 ret = auok190x_runtime_suspend(dev);
710 if (ret < 0) {
711 dev_err(dev, "auok190x_runtime_suspend failed with %d\n",
712 ret);
713 return ret;
714 }
715 par->manual_standby = 1;
716 }
717
718 gpio_direction_output(board->gpio_nsleep, 0);
719 }
720
721 msleep(100);
722
723 return 0;
724}
725
726static int auok190x_resume(struct device *dev)
727{
728 struct platform_device *pdev = to_platform_device(dev);
729 struct fb_info *info = platform_get_drvdata(pdev);
730 struct auok190xfb_par *par = info->par;
731 struct auok190x_board *board = par->board;
732
733 dev_dbg(dev, "resume\n");
734 if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
735 dev_dbg(dev, "resume with broken standby\n");
736
737 auok190x_power(par, 1);
738
739 par->init(par);
740 } else {
741 dev_dbg(dev, "resume from sleep\n");
742
743 /* device should be in runtime suspend when we were suspended
744 * and pm_runtime_put_sync gets called after this function.
745 * So there is no need to touch the standby mode here at all.
746 */
747 gpio_direction_output(board->gpio_nsleep, 1);
748 msleep(100);
749
750 /* an additional init call seems to be necessary after sleep */
751 auok190x_runtime_resume(dev);
752 par->init(par);
753
754 /* if we were runtime-suspended before, suspend again*/
755 if (!par->manual_standby)
756 auok190x_runtime_suspend(dev);
757 else
758 par->manual_standby = 0;
759 }
760
761 return 0;
762}
763#endif
764
765const struct dev_pm_ops auok190x_pm = {
766 SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
767 NULL)
768 SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume)
769};
770EXPORT_SYMBOL_GPL(auok190x_pm);
771
772/*
773 * Common probe and remove code
774 */
775
776int __devinit auok190x_common_probe(struct platform_device *pdev,
777 struct auok190x_init_data *init)
778{
779 struct auok190x_board *board = init->board;
780 struct auok190xfb_par *par;
781 struct fb_info *info;
782 struct panel_info *panel;
783 int videomemorysize, ret;
784 unsigned char *videomemory;
785
786 /* check board contents */
787 if (!board->init || !board->cleanup || !board->wait_for_rdy
788 || !board->set_ctl || !board->set_hdb || !board->get_hdb
789 || !board->setup_irq)
790 return -EINVAL;
791
792 info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev);
793 if (!info)
794 return -ENOMEM;
795
796 par = info->par;
797 par->info = info;
798 par->board = board;
799 par->recover = auok190x_recover;
800 par->update_partial = init->update_partial;
801 par->update_all = init->update_all;
802 par->need_refresh = init->need_refresh;
803 par->init = init->init;
804
805 /* init update modes */
806 par->update_cnt = 0;
807 par->update_mode = -1;
808 par->last_mode = -1;
809 par->flash = 0;
810
811 par->regulator = regulator_get(info->device, "vdd");
812 if (IS_ERR(par->regulator)) {
813 ret = PTR_ERR(par->regulator);
814 dev_err(info->device, "Failed to get regulator: %d\n", ret);
815 goto err_reg;
816 }
817
818 ret = board->init(par);
819 if (ret) {
820 dev_err(info->device, "board init failed, %d\n", ret);
821 goto err_board;
822 }
823
824 ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep");
825 if (ret) {
826 dev_err(info->device, "could not request sleep gpio, %d\n",
827 ret);
828 goto err_gpio1;
829 }
830
831 ret = gpio_direction_output(board->gpio_nsleep, 0);
832 if (ret) {
833 dev_err(info->device, "could not set sleep gpio, %d\n", ret);
834 goto err_gpio2;
835 }
836
837 ret = gpio_request(board->gpio_nrst, "AUOK190x reset");
838 if (ret) {
839 dev_err(info->device, "could not request reset gpio, %d\n",
840 ret);
841 goto err_gpio2;
842 }
843
844 ret = gpio_direction_output(board->gpio_nrst, 0);
845 if (ret) {
846 dev_err(info->device, "could not set reset gpio, %d\n", ret);
847 goto err_gpio3;
848 }
849
850 ret = auok190x_power(par, 1);
851 if (ret) {
852 dev_err(info->device, "could not power on the device, %d\n",
853 ret);
854 goto err_gpio3;
855 }
856
857 mutex_init(&par->io_lock);
858
859 init_waitqueue_head(&par->waitq);
860
861 ret = par->board->setup_irq(par->info);
862 if (ret) {
863 dev_err(info->device, "could not setup ready-irq, %d\n", ret);
864 goto err_irq;
865 }
866
867 /* wait for init to complete */
868 par->board->wait_for_rdy(par);
869
870 /*
871 * From here on the controller can talk to us
872 */
873
874 /* initialise fix, var, resolution and rotation */
875
876 strlcpy(info->fix.id, init->id, 16);
877 info->fix.type = FB_TYPE_PACKED_PIXELS;
878 info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
879 info->fix.xpanstep = 0;
880 info->fix.ypanstep = 0;
881 info->fix.ywrapstep = 0;
882 info->fix.accel = FB_ACCEL_NONE;
883
884 info->var.bits_per_pixel = 8;
885 info->var.grayscale = 1;
886 info->var.red.length = 8;
887 info->var.green.length = 8;
888 info->var.blue.length = 8;
889
890 panel = &panel_table[board->resolution];
891
892 /* if 90 degree rotation, switch width and height */
893 if (board->rotation & 1) {
894 info->var.xres = panel->h;
895 info->var.yres = panel->w;
896 info->var.xres_virtual = panel->h;
897 info->var.yres_virtual = panel->w;
898 info->fix.line_length = panel->h;
899 } else {
900 info->var.xres = panel->w;
901 info->var.yres = panel->h;
902 info->var.xres_virtual = panel->w;
903 info->var.yres_virtual = panel->h;
904 info->fix.line_length = panel->w;
905 }
906
907 par->resolution = board->resolution;
908 par->rotation = board->rotation;
909
910 /* videomemory handling */
911
912 videomemorysize = roundup((panel->w * panel->h), PAGE_SIZE);
913 videomemory = vmalloc(videomemorysize);
914 if (!videomemory) {
915 ret = -ENOMEM;
916 goto err_irq;
917 }
918
919 memset(videomemory, 0, videomemorysize);
920 info->screen_base = (char *)videomemory;
921 info->fix.smem_len = videomemorysize;
922
923 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
924 info->fbops = &auok190xfb_ops;
925
926 /* deferred io init */
927
928 info->fbdefio = devm_kzalloc(info->device,
929 sizeof(struct fb_deferred_io),
930 GFP_KERNEL);
931 if (!info->fbdefio) {
932 dev_err(info->device, "Failed to allocate memory\n");
933 ret = -ENOMEM;
934 goto err_defio;
935 }
936
937 dev_dbg(info->device, "targetting %d frames per second\n", board->fps);
938 info->fbdefio->delay = HZ / board->fps;
939 info->fbdefio->first_io = auok190xfb_dpy_first_io,
940 info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io,
941 fb_deferred_io_init(info);
942
943 /* color map */
944
945 ret = fb_alloc_cmap(&info->cmap, 256, 0);
946 if (ret < 0) {
947 dev_err(info->device, "Failed to allocate colormap\n");
948 goto err_cmap;
949 }
950
951 /* controller init */
952
953 par->consecutive_threshold = 100;
954 par->init(par);
955 auok190x_identify(par);
956
957 platform_set_drvdata(pdev, info);
958
959 ret = register_framebuffer(info);
960 if (ret < 0)
961 goto err_regfb;
962
963 ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group);
964 if (ret)
965 goto err_sysfs;
966
967 dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n",
968 info->node, info->var.xres, info->var.yres,
969 videomemorysize >> 10);
970
971 /* increase autosuspend_delay when we use alternative methods
972 * for runtime_pm
973 */
974 par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN)
975 ? 1000 : 200;
976
977 pm_runtime_set_active(info->device);
978 pm_runtime_enable(info->device);
979 pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay);
980 pm_runtime_use_autosuspend(info->device);
981
982 return 0;
983
984err_sysfs:
985 unregister_framebuffer(info);
986err_regfb:
987 fb_dealloc_cmap(&info->cmap);
988err_cmap:
989 fb_deferred_io_cleanup(info);
990 kfree(info->fbdefio);
991err_defio:
992 vfree((void *)info->screen_base);
993err_irq:
994 auok190x_power(par, 0);
995err_gpio3:
996 gpio_free(board->gpio_nrst);
997err_gpio2:
998 gpio_free(board->gpio_nsleep);
999err_gpio1:
1000 board->cleanup(par);
1001err_board:
1002 regulator_put(par->regulator);
1003err_reg:
1004 framebuffer_release(info);
1005
1006 return ret;
1007}
1008EXPORT_SYMBOL_GPL(auok190x_common_probe);
1009
1010int __devexit auok190x_common_remove(struct platform_device *pdev)
1011{
1012 struct fb_info *info = platform_get_drvdata(pdev);
1013 struct auok190xfb_par *par = info->par;
1014 struct auok190x_board *board = par->board;
1015
1016 pm_runtime_disable(info->device);
1017
1018 sysfs_remove_group(&info->device->kobj, &auok190x_attr_group);
1019
1020 unregister_framebuffer(info);
1021
1022 fb_dealloc_cmap(&info->cmap);
1023
1024 fb_deferred_io_cleanup(info);
1025 kfree(info->fbdefio);
1026
1027 vfree((void *)info->screen_base);
1028
1029 auok190x_power(par, 0);
1030
1031 gpio_free(board->gpio_nrst);
1032 gpio_free(board->gpio_nsleep);
1033
1034 board->cleanup(par);
1035
1036 regulator_put(par->regulator);
1037
1038 framebuffer_release(info);
1039
1040 return 0;
1041}
1042EXPORT_SYMBOL_GPL(auok190x_common_remove);
1043
1044MODULE_DESCRIPTION("Common code for AUO-K190X controllers");
1045MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
1046MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.h b/drivers/video/auo_k190x.h
new file mode 100644
index 000000000000..e35af1f51b28
--- /dev/null
+++ b/drivers/video/auo_k190x.h
@@ -0,0 +1,129 @@
1/*
2 * Private common definitions for AUO-K190X framebuffer drivers
3 *
4 * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * I80 interface specific defines
13 */
14
15#define AUOK190X_I80_CS 0x01
16#define AUOK190X_I80_DC 0x02
17#define AUOK190X_I80_WR 0x03
18#define AUOK190X_I80_OE 0x04
19
20/*
21 * AUOK190x commands, common to both controllers
22 */
23
24#define AUOK190X_CMD_INIT 0x0000
25#define AUOK190X_CMD_STANDBY 0x0001
26#define AUOK190X_CMD_WAKEUP 0x0002
27#define AUOK190X_CMD_TCON_RESET 0x0003
28#define AUOK190X_CMD_DATA_STOP 0x1002
29#define AUOK190X_CMD_LUT_START 0x1003
30#define AUOK190X_CMD_DISP_REFRESH 0x1004
31#define AUOK190X_CMD_DISP_RESET 0x1005
32#define AUOK190X_CMD_PRE_DISPLAY_START 0x100D
33#define AUOK190X_CMD_PRE_DISPLAY_STOP 0x100F
34#define AUOK190X_CMD_FLASH_W 0x2000
35#define AUOK190X_CMD_FLASH_E 0x2001
36#define AUOK190X_CMD_FLASH_STS 0x2002
37#define AUOK190X_CMD_FRAMERATE 0x3000
38#define AUOK190X_CMD_READ_VERSION 0x4000
39#define AUOK190X_CMD_READ_STATUS 0x4001
40#define AUOK190X_CMD_READ_LUT 0x4003
41#define AUOK190X_CMD_DRIVERTIMING 0x5000
42#define AUOK190X_CMD_LBALANCE 0x5001
43#define AUOK190X_CMD_AGINGMODE 0x6000
44#define AUOK190X_CMD_AGINGEXIT 0x6001
45
46/*
47 * Common settings for AUOK190X_CMD_INIT
48 */
49
50#define AUOK190X_INIT_DATA_FILTER (0 << 12)
51#define AUOK190X_INIT_DATA_BYPASS (1 << 12)
52#define AUOK190X_INIT_INVERSE_WHITE (0 << 9)
53#define AUOK190X_INIT_INVERSE_BLACK (1 << 9)
54#define AUOK190X_INIT_SCAN_DOWN (0 << 1)
55#define AUOK190X_INIT_SCAN_UP (1 << 1)
56#define AUOK190X_INIT_SHIFT_LEFT (0 << 0)
57#define AUOK190X_INIT_SHIFT_RIGHT (1 << 0)
58
59/* Common bits to pixels
60 * Mode 15-12 11-8 7-4 3-0
61 * format0 4 3 2 1
62 * format1 3 4 1 2
63 */
64
65#define AUOK190X_INIT_FORMAT0 0
66#define AUOK190X_INIT_FORMAT1 (1 << 6)
67
68/*
69 * settings for AUOK190X_CMD_RESET
70 */
71
72#define AUOK190X_RESET_TCON (0 << 0)
73#define AUOK190X_RESET_NORMAL (1 << 0)
74#define AUOK190X_RESET_PON (1 << 1)
75
76/*
77 * AUOK190X_CMD_VERSION
78 */
79
80#define AUOK190X_VERSION_TEMP_MASK (0x1ff)
81#define AUOK190X_VERSION_EPD_MASK (0xff)
82#define AUOK190X_VERSION_SIZE_INT(_val) ((_val & 0xfc00) >> 10)
83#define AUOK190X_VERSION_SIZE_FLOAT(_val) ((_val & 0x3c0) >> 6)
84#define AUOK190X_VERSION_MODEL(_val) (_val & 0x3f)
85#define AUOK190X_VERSION_LUT(_val) (_val & 0xff)
86#define AUOK190X_VERSION_TCON(_val) ((_val & 0xff00) >> 8)
87
88/*
89 * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901
90 */
91
92#define AUOK190X_UPDATE_MODE(_res) ((_res & 0x7) << 12)
93#define AUOK190X_UPDATE_NONFLASH (1 << 15)
94
95/*
96 * track panel specific parameters for common init
97 */
98
99struct auok190x_init_data {
100 char *id;
101 struct auok190x_board *board;
102
103 void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
104 void (*update_all)(struct auok190xfb_par *par);
105 bool (*need_refresh)(struct auok190xfb_par *par);
106 void (*init)(struct auok190xfb_par *par);
107};
108
109
110extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data);
111extern int auok190x_send_command(struct auok190xfb_par *par, u16 data);
112extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
113 int argc, u16 *argv);
114extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
115 int argc, u16 *argv);
116extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par,
117 u16 cmd, int argc, u16 *argv,
118 int size, u16 *data);
119extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
120 int argc, u16 *argv, int size,
121 u16 *data);
122extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
123 int argc, u16 *argv);
124
125extern int auok190x_common_probe(struct platform_device *pdev,
126 struct auok190x_init_data *init);
127extern int auok190x_common_remove(struct platform_device *pdev);
128
129extern const struct dev_pm_ops auok190x_pm;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index fa2b03750316..2979292650d6 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -88,7 +88,7 @@ config LCD_PLATFORM
88 88
89config LCD_TOSA 89config LCD_TOSA
90 tristate "Sharp SL-6000 LCD Driver" 90 tristate "Sharp SL-6000 LCD Driver"
91 depends on SPI && MACH_TOSA 91 depends on I2C && SPI && MACH_TOSA
92 help 92 help
93 If you have an Sharp SL-6000 Zaurus say Y to enable a driver 93 If you have an Sharp SL-6000 Zaurus say Y to enable a driver
94 for its LCD. 94 for its LCD.
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 6c9399341bcf..9327cd1b3143 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -263,7 +263,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
263 263
264EXPORT_SYMBOL_GPL(ili9320_probe_spi); 264EXPORT_SYMBOL_GPL(ili9320_probe_spi);
265 265
266int __devexit ili9320_remove(struct ili9320 *ili) 266int ili9320_remove(struct ili9320 *ili)
267{ 267{
268 ili9320_power(ili, FB_BLANK_POWERDOWN); 268 ili9320_power(ili, FB_BLANK_POWERDOWN);
269 269
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index 1a268a294478..9bdd4b0c18c8 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off,
353 353
354static int 354static int
355adv7393_write_proc(struct file *file, const char __user * buffer, 355adv7393_write_proc(struct file *file, const char __user * buffer,
356 unsigned long count, void *data) 356 size_t count, void *data)
357{ 357{
358 struct adv7393fb_device *fbdev = data; 358 struct adv7393fb_device *fbdev = data;
359 char line[8];
360 unsigned int val; 359 unsigned int val;
361 int ret; 360 int ret;
362 361
363 ret = copy_from_user(line, buffer, count); 362 ret = kstrtouint_from_user(buffer, count, 0, &val);
364 if (ret) 363 if (ret)
365 return -EFAULT; 364 return -EFAULT;
366 365
367 val = simple_strtoul(line, NULL, 0);
368 adv7393_write(fbdev->client, val >> 8, val & 0xff); 366 adv7393_write(fbdev->client, val >> 8, val & 0xff);
369 367
370 return count; 368 return count;
@@ -414,14 +412,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
414 if (ret) { 412 if (ret) {
415 dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n"); 413 dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
416 ret = -EBUSY; 414 ret = -EBUSY;
417 goto out_8; 415 goto free_fbdev;
418 } 416 }
419 } 417 }
420 418
421 if (peripheral_request_list(ppi_pins, DRIVER_NAME)) { 419 if (peripheral_request_list(ppi_pins, DRIVER_NAME)) {
422 dev_err(&client->dev, "requesting PPI peripheral failed\n"); 420 dev_err(&client->dev, "requesting PPI peripheral failed\n");
423 ret = -EFAULT; 421 ret = -EFAULT;
424 goto out_8; 422 goto free_gpio;
425 } 423 }
426 424
427 fbdev->fb_mem = 425 fbdev->fb_mem =
@@ -432,7 +430,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
432 dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n", 430 dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n",
433 (u32) fbdev->fb_len); 431 (u32) fbdev->fb_len);
434 ret = -ENOMEM; 432 ret = -ENOMEM;
435 goto out_7; 433 goto free_ppi_pins;
436 } 434 }
437 435
438 fbdev->info.screen_base = (void *)fbdev->fb_mem; 436 fbdev->info.screen_base = (void *)fbdev->fb_mem;
@@ -464,27 +462,27 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
464 if (!fbdev->info.pseudo_palette) { 462 if (!fbdev->info.pseudo_palette) {
465 dev_err(&client->dev, "failed to allocate pseudo_palette\n"); 463 dev_err(&client->dev, "failed to allocate pseudo_palette\n");
466 ret = -ENOMEM; 464 ret = -ENOMEM;
467 goto out_6; 465 goto free_fb_mem;
468 } 466 }
469 467
470 if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) { 468 if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
471 dev_err(&client->dev, "failed to allocate colormap (%d entries)\n", 469 dev_err(&client->dev, "failed to allocate colormap (%d entries)\n",
472 BFIN_LCD_NBR_PALETTE_ENTRIES); 470 BFIN_LCD_NBR_PALETTE_ENTRIES);
473 ret = -EFAULT; 471 ret = -EFAULT;
474 goto out_5; 472 goto free_palette;
475 } 473 }
476 474
477 if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) { 475 if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) {
478 dev_err(&client->dev, "unable to request PPI DMA\n"); 476 dev_err(&client->dev, "unable to request PPI DMA\n");
479 ret = -EFAULT; 477 ret = -EFAULT;
480 goto out_4; 478 goto free_cmap;
481 } 479 }
482 480
483 if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0, 481 if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0,
484 "PPI ERROR", fbdev) < 0) { 482 "PPI ERROR", fbdev) < 0) {
485 dev_err(&client->dev, "unable to request PPI ERROR IRQ\n"); 483 dev_err(&client->dev, "unable to request PPI ERROR IRQ\n");
486 ret = -EFAULT; 484 ret = -EFAULT;
487 goto out_3; 485 goto free_ch_ppi;
488 } 486 }
489 487
490 fbdev->open = 0; 488 fbdev->open = 0;
@@ -494,14 +492,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
494 492
495 if (ret) { 493 if (ret) {
496 dev_err(&client->dev, "i2c attach: init error\n"); 494 dev_err(&client->dev, "i2c attach: init error\n");
497 goto out_1; 495 goto free_irq_ppi;
498 } 496 }
499 497
500 498
501 if (register_framebuffer(&fbdev->info) < 0) { 499 if (register_framebuffer(&fbdev->info) < 0) {
502 dev_err(&client->dev, "unable to register framebuffer\n"); 500 dev_err(&client->dev, "unable to register framebuffer\n");
503 ret = -EFAULT; 501 ret = -EFAULT;
504 goto out_1; 502 goto free_irq_ppi;
505 } 503 }
506 504
507 dev_info(&client->dev, "fb%d: %s frame buffer device\n", 505 dev_info(&client->dev, "fb%d: %s frame buffer device\n",
@@ -512,7 +510,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
512 if (!entry) { 510 if (!entry) {
513 dev_err(&client->dev, "unable to create /proc entry\n"); 511 dev_err(&client->dev, "unable to create /proc entry\n");
514 ret = -EFAULT; 512 ret = -EFAULT;
515 goto out_0; 513 goto free_fb;
516 } 514 }
517 515
518 entry->read_proc = adv7393_read_proc; 516 entry->read_proc = adv7393_read_proc;
@@ -521,22 +519,25 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
521 519
522 return 0; 520 return 0;
523 521
524 out_0: 522free_fb:
525 unregister_framebuffer(&fbdev->info); 523 unregister_framebuffer(&fbdev->info);
526 out_1: 524free_irq_ppi:
527 free_irq(IRQ_PPI_ERROR, fbdev); 525 free_irq(IRQ_PPI_ERROR, fbdev);
528 out_3: 526free_ch_ppi:
529 free_dma(CH_PPI); 527 free_dma(CH_PPI);
530 out_4: 528free_cmap:
531 dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
532 fbdev->dma_handle);
533 out_5:
534 fb_dealloc_cmap(&fbdev->info.cmap); 529 fb_dealloc_cmap(&fbdev->info.cmap);
535 out_6: 530free_palette:
536 kfree(fbdev->info.pseudo_palette); 531 kfree(fbdev->info.pseudo_palette);
537 out_7: 532free_fb_mem:
533 dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
534 fbdev->dma_handle);
535free_ppi_pins:
538 peripheral_free_list(ppi_pins); 536 peripheral_free_list(ppi_pins);
539 out_8: 537free_gpio:
538 if (ANOMALY_05000400)
539 gpio_free(P_IDENT(P_PPI0_FS3));
540free_fbdev:
540 kfree(fbdev); 541 kfree(fbdev);
541 542
542 return ret; 543 return ret;
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index 377dde3d5bfc..c95b417d0d41 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev)
1211 1211
1212static struct platform_driver broadsheetfb_driver = { 1212static struct platform_driver broadsheetfb_driver = {
1213 .probe = broadsheetfb_probe, 1213 .probe = broadsheetfb_probe,
1214 .remove = broadsheetfb_remove, 1214 .remove = __devexit_p(broadsheetfb_remove),
1215 .driver = { 1215 .driver = {
1216 .owner = THIS_MODULE, 1216 .owner = THIS_MODULE,
1217 .name = "broadsheetfb", 1217 .name = "broadsheetfb",
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index f56699d8122a..eae46f6457e2 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Cobalt server LCD frame buffer driver. 2 * Cobalt/SEAD3 LCD frame buffer driver.
3 * 3 *
4 * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> 4 * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
5 * Copyright (C) 2012 MIPS Technologies, Inc.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -62,6 +63,7 @@
62#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK) 63#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
63#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE) 64#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
64 65
66#ifdef CONFIG_MIPS_COBALT
65static inline void lcd_write_control(struct fb_info *info, u8 control) 67static inline void lcd_write_control(struct fb_info *info, u8 control)
66{ 68{
67 writel((u32)control << 24, info->screen_base); 69 writel((u32)control << 24, info->screen_base);
@@ -81,6 +83,47 @@ static inline u8 lcd_read_data(struct fb_info *info)
81{ 83{
82 return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24; 84 return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
83} 85}
86#else
87
88#define LCD_CTL 0x00
89#define LCD_DATA 0x08
90#define CPLD_STATUS 0x10
91#define CPLD_DATA 0x18
92
93static inline void cpld_wait(struct fb_info *info)
94{
95 do {
96 } while (readl(info->screen_base + CPLD_STATUS) & 1);
97}
98
99static inline void lcd_write_control(struct fb_info *info, u8 control)
100{
101 cpld_wait(info);
102 writel(control, info->screen_base + LCD_CTL);
103}
104
105static inline u8 lcd_read_control(struct fb_info *info)
106{
107 cpld_wait(info);
108 readl(info->screen_base + LCD_CTL);
109 cpld_wait(info);
110 return readl(info->screen_base + CPLD_DATA) & 0xff;
111}
112
113static inline void lcd_write_data(struct fb_info *info, u8 data)
114{
115 cpld_wait(info);
116 writel(data, info->screen_base + LCD_DATA);
117}
118
119static inline u8 lcd_read_data(struct fb_info *info)
120{
121 cpld_wait(info);
122 readl(info->screen_base + LCD_DATA);
123 cpld_wait(info);
124 return readl(info->screen_base + CPLD_DATA) & 0xff;
125}
126#endif
84 127
85static int lcd_busy_wait(struct fb_info *info) 128static int lcd_busy_wait(struct fb_info *info)
86{ 129{
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index c2d11fef114b..e2c96d01d8f5 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -224,5 +224,19 @@ config FONT_10x18
224 big letters. It fits between the sun 12x22 and the normal 8x16 font. 224 big letters. It fits between the sun 12x22 and the normal 8x16 font.
225 If other fonts are too big or too small for you, say Y, otherwise say N. 225 If other fonts are too big or too small for you, say Y, otherwise say N.
226 226
227config FONT_AUTOSELECT
228 def_bool y
229 depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
230 depends on !FONT_8x8
231 depends on !FONT_6x11
232 depends on !FONT_7x14
233 depends on !FONT_PEARL_8x8
234 depends on !FONT_ACORN_8x8
235 depends on !FONT_MINI_4x6
236 depends on !FONT_SUN8x16
237 depends on !FONT_SUN12x22
238 depends on !FONT_10x18
239 select FONT_8x16
240
227endmenu 241endmenu
228 242
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index f8babbeee275..345d96230978 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -507,16 +507,16 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
507 507
508 err = fb_alloc_cmap(&info->cmap, 256, 0); 508 err = fb_alloc_cmap(&info->cmap, 256, 0);
509 if (err) 509 if (err)
510 goto failed; 510 goto failed_cmap;
511 511
512 err = ep93xxfb_alloc_videomem(info); 512 err = ep93xxfb_alloc_videomem(info);
513 if (err) 513 if (err)
514 goto failed; 514 goto failed_videomem;
515 515
516 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 516 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
517 if (!res) { 517 if (!res) {
518 err = -ENXIO; 518 err = -ENXIO;
519 goto failed; 519 goto failed_resource;
520 } 520 }
521 521
522 /* 522 /*
@@ -532,7 +532,7 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
532 fbi->mmio_base = ioremap(res->start, resource_size(res)); 532 fbi->mmio_base = ioremap(res->start, resource_size(res));
533 if (!fbi->mmio_base) { 533 if (!fbi->mmio_base) {
534 err = -ENXIO; 534 err = -ENXIO;
535 goto failed; 535 goto failed_resource;
536 } 536 }
537 537
538 strcpy(info->fix.id, pdev->name); 538 strcpy(info->fix.id, pdev->name);
@@ -553,24 +553,24 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
553 if (err == 0) { 553 if (err == 0) {
554 dev_err(info->dev, "No suitable video mode found\n"); 554 dev_err(info->dev, "No suitable video mode found\n");
555 err = -EINVAL; 555 err = -EINVAL;
556 goto failed; 556 goto failed_mode;
557 } 557 }
558 558
559 if (mach_info->setup) { 559 if (mach_info->setup) {
560 err = mach_info->setup(pdev); 560 err = mach_info->setup(pdev);
561 if (err) 561 if (err)
562 return err; 562 goto failed_mode;
563 } 563 }
564 564
565 err = ep93xxfb_check_var(&info->var, info); 565 err = ep93xxfb_check_var(&info->var, info);
566 if (err) 566 if (err)
567 goto failed; 567 goto failed_check;
568 568
569 fbi->clk = clk_get(info->dev, NULL); 569 fbi->clk = clk_get(info->dev, NULL);
570 if (IS_ERR(fbi->clk)) { 570 if (IS_ERR(fbi->clk)) {
571 err = PTR_ERR(fbi->clk); 571 err = PTR_ERR(fbi->clk);
572 fbi->clk = NULL; 572 fbi->clk = NULL;
573 goto failed; 573 goto failed_check;
574 } 574 }
575 575
576 ep93xxfb_set_par(info); 576 ep93xxfb_set_par(info);
@@ -585,15 +585,17 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
585 return 0; 585 return 0;
586 586
587failed: 587failed:
588 if (fbi->clk) 588 clk_put(fbi->clk);
589 clk_put(fbi->clk); 589failed_check:
590 if (fbi->mmio_base)
591 iounmap(fbi->mmio_base);
592 ep93xxfb_dealloc_videomem(info);
593 if (&info->cmap)
594 fb_dealloc_cmap(&info->cmap);
595 if (fbi->mach_info->teardown) 590 if (fbi->mach_info->teardown)
596 fbi->mach_info->teardown(pdev); 591 fbi->mach_info->teardown(pdev);
592failed_mode:
593 iounmap(fbi->mmio_base);
594failed_resource:
595 ep93xxfb_dealloc_videomem(info);
596failed_videomem:
597 fb_dealloc_cmap(&info->cmap);
598failed_cmap:
597 kfree(info); 599 kfree(info);
598 platform_set_drvdata(pdev, NULL); 600 platform_set_drvdata(pdev, NULL);
599 601
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index 2a4481cf260c..a36b2d28280e 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -21,14 +21,14 @@
21 21
22#include <video/exynos_dp.h> 22#include <video/exynos_dp.h>
23 23
24#include <plat/cpu.h>
25
26#include "exynos_dp_core.h" 24#include "exynos_dp_core.h"
27 25
28static int exynos_dp_init_dp(struct exynos_dp_device *dp) 26static int exynos_dp_init_dp(struct exynos_dp_device *dp)
29{ 27{
30 exynos_dp_reset(dp); 28 exynos_dp_reset(dp);
31 29
30 exynos_dp_swreset(dp);
31
32 /* SW defined function Normal operation */ 32 /* SW defined function Normal operation */
33 exynos_dp_enable_sw_function(dp); 33 exynos_dp_enable_sw_function(dp);
34 34
@@ -478,7 +478,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
478 int lane_count; 478 int lane_count;
479 u8 buf[5]; 479 u8 buf[5];
480 480
481 u8 *adjust_request; 481 u8 adjust_request[2];
482 u8 voltage_swing; 482 u8 voltage_swing;
483 u8 pre_emphasis; 483 u8 pre_emphasis;
484 u8 training_lane; 484 u8 training_lane;
@@ -493,8 +493,8 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
493 /* set training pattern 2 for EQ */ 493 /* set training pattern 2 for EQ */
494 exynos_dp_set_training_pattern(dp, TRAINING_PTN2); 494 exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
495 495
496 adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1 496 adjust_request[0] = link_status[4];
497 - DPCD_ADDR_LANE0_1_STATUS); 497 adjust_request[1] = link_status[5];
498 498
499 exynos_dp_get_adjust_train(dp, adjust_request); 499 exynos_dp_get_adjust_train(dp, adjust_request);
500 500
@@ -566,7 +566,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
566 u8 buf[5]; 566 u8 buf[5];
567 u32 reg; 567 u32 reg;
568 568
569 u8 *adjust_request; 569 u8 adjust_request[2];
570 570
571 udelay(400); 571 udelay(400);
572 572
@@ -575,8 +575,8 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
575 lane_count = dp->link_train.lane_count; 575 lane_count = dp->link_train.lane_count;
576 576
577 if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) { 577 if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
578 adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1 578 adjust_request[0] = link_status[4];
579 - DPCD_ADDR_LANE0_1_STATUS); 579 adjust_request[1] = link_status[5];
580 580
581 if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) { 581 if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) {
582 /* traing pattern Set to Normal */ 582 /* traing pattern Set to Normal */
@@ -770,7 +770,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
770 return -ETIMEDOUT; 770 return -ETIMEDOUT;
771 } 771 }
772 772
773 mdelay(100); 773 udelay(1);
774 } 774 }
775 775
776 /* Set to use the register calculated M/N video */ 776 /* Set to use the register calculated M/N video */
@@ -804,7 +804,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
804 return -ETIMEDOUT; 804 return -ETIMEDOUT;
805 } 805 }
806 806
807 mdelay(100); 807 mdelay(1);
808 } 808 }
809 809
810 if (retval != 0) 810 if (retval != 0)
@@ -860,7 +860,8 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
860 return -EINVAL; 860 return -EINVAL;
861 } 861 }
862 862
863 dp = kzalloc(sizeof(struct exynos_dp_device), GFP_KERNEL); 863 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
864 GFP_KERNEL);
864 if (!dp) { 865 if (!dp) {
865 dev_err(&pdev->dev, "no memory for device data\n"); 866 dev_err(&pdev->dev, "no memory for device data\n");
866 return -ENOMEM; 867 return -ENOMEM;
@@ -871,8 +872,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
871 dp->clock = clk_get(&pdev->dev, "dp"); 872 dp->clock = clk_get(&pdev->dev, "dp");
872 if (IS_ERR(dp->clock)) { 873 if (IS_ERR(dp->clock)) {
873 dev_err(&pdev->dev, "failed to get clock\n"); 874 dev_err(&pdev->dev, "failed to get clock\n");
874 ret = PTR_ERR(dp->clock); 875 return PTR_ERR(dp->clock);
875 goto err_dp;
876 } 876 }
877 877
878 clk_enable(dp->clock); 878 clk_enable(dp->clock);
@@ -884,35 +884,25 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
884 goto err_clock; 884 goto err_clock;
885 } 885 }
886 886
887 res = request_mem_region(res->start, resource_size(res), 887 dp->reg_base = devm_request_and_ioremap(&pdev->dev, res);
888 dev_name(&pdev->dev));
889 if (!res) {
890 dev_err(&pdev->dev, "failed to request registers region\n");
891 ret = -EINVAL;
892 goto err_clock;
893 }
894
895 dp->res = res;
896
897 dp->reg_base = ioremap(res->start, resource_size(res));
898 if (!dp->reg_base) { 888 if (!dp->reg_base) {
899 dev_err(&pdev->dev, "failed to ioremap\n"); 889 dev_err(&pdev->dev, "failed to ioremap\n");
900 ret = -ENOMEM; 890 ret = -ENOMEM;
901 goto err_req_region; 891 goto err_clock;
902 } 892 }
903 893
904 dp->irq = platform_get_irq(pdev, 0); 894 dp->irq = platform_get_irq(pdev, 0);
905 if (!dp->irq) { 895 if (!dp->irq) {
906 dev_err(&pdev->dev, "failed to get irq\n"); 896 dev_err(&pdev->dev, "failed to get irq\n");
907 ret = -ENODEV; 897 ret = -ENODEV;
908 goto err_ioremap; 898 goto err_clock;
909 } 899 }
910 900
911 ret = request_irq(dp->irq, exynos_dp_irq_handler, 0, 901 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
912 "exynos-dp", dp); 902 "exynos-dp", dp);
913 if (ret) { 903 if (ret) {
914 dev_err(&pdev->dev, "failed to request irq\n"); 904 dev_err(&pdev->dev, "failed to request irq\n");
915 goto err_ioremap; 905 goto err_clock;
916 } 906 }
917 907
918 dp->video_info = pdata->video_info; 908 dp->video_info = pdata->video_info;
@@ -924,7 +914,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
924 ret = exynos_dp_detect_hpd(dp); 914 ret = exynos_dp_detect_hpd(dp);
925 if (ret) { 915 if (ret) {
926 dev_err(&pdev->dev, "unable to detect hpd\n"); 916 dev_err(&pdev->dev, "unable to detect hpd\n");
927 goto err_irq; 917 goto err_clock;
928 } 918 }
929 919
930 exynos_dp_handle_edid(dp); 920 exynos_dp_handle_edid(dp);
@@ -933,7 +923,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
933 dp->video_info->link_rate); 923 dp->video_info->link_rate);
934 if (ret) { 924 if (ret) {
935 dev_err(&pdev->dev, "unable to do link train\n"); 925 dev_err(&pdev->dev, "unable to do link train\n");
936 goto err_irq; 926 goto err_clock;
937 } 927 }
938 928
939 exynos_dp_enable_scramble(dp, 1); 929 exynos_dp_enable_scramble(dp, 1);
@@ -947,23 +937,15 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
947 ret = exynos_dp_config_video(dp, dp->video_info); 937 ret = exynos_dp_config_video(dp, dp->video_info);
948 if (ret) { 938 if (ret) {
949 dev_err(&pdev->dev, "unable to config video\n"); 939 dev_err(&pdev->dev, "unable to config video\n");
950 goto err_irq; 940 goto err_clock;
951 } 941 }
952 942
953 platform_set_drvdata(pdev, dp); 943 platform_set_drvdata(pdev, dp);
954 944
955 return 0; 945 return 0;
956 946
957err_irq:
958 free_irq(dp->irq, dp);
959err_ioremap:
960 iounmap(dp->reg_base);
961err_req_region:
962 release_mem_region(res->start, resource_size(res));
963err_clock: 947err_clock:
964 clk_put(dp->clock); 948 clk_put(dp->clock);
965err_dp:
966 kfree(dp);
967 949
968 return ret; 950 return ret;
969} 951}
@@ -976,16 +958,9 @@ static int __devexit exynos_dp_remove(struct platform_device *pdev)
976 if (pdata && pdata->phy_exit) 958 if (pdata && pdata->phy_exit)
977 pdata->phy_exit(); 959 pdata->phy_exit();
978 960
979 free_irq(dp->irq, dp);
980 iounmap(dp->reg_base);
981
982 clk_disable(dp->clock); 961 clk_disable(dp->clock);
983 clk_put(dp->clock); 962 clk_put(dp->clock);
984 963
985 release_mem_region(dp->res->start, resource_size(dp->res));
986
987 kfree(dp);
988
989 return 0; 964 return 0;
990} 965}
991 966
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 90ceaca0fa24..1e0f998e0c9f 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -26,7 +26,6 @@ struct link_train {
26 26
27struct exynos_dp_device { 27struct exynos_dp_device {
28 struct device *dev; 28 struct device *dev;
29 struct resource *res;
30 struct clk *clock; 29 struct clk *clock;
31 unsigned int irq; 30 unsigned int irq;
32 void __iomem *reg_base; 31 void __iomem *reg_base;
@@ -39,8 +38,10 @@ struct exynos_dp_device {
39void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable); 38void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
40void exynos_dp_stop_video(struct exynos_dp_device *dp); 39void exynos_dp_stop_video(struct exynos_dp_device *dp);
41void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable); 40void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
41void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
42void exynos_dp_init_interrupt(struct exynos_dp_device *dp); 42void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
43void exynos_dp_reset(struct exynos_dp_device *dp); 43void exynos_dp_reset(struct exynos_dp_device *dp);
44void exynos_dp_swreset(struct exynos_dp_device *dp);
44void exynos_dp_config_interrupt(struct exynos_dp_device *dp); 45void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
45u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp); 46u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
46void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable); 47void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 6548afa0e3d2..6ce76d56c3a1 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -16,8 +16,6 @@
16 16
17#include <video/exynos_dp.h> 17#include <video/exynos_dp.h>
18 18
19#include <plat/cpu.h>
20
21#include "exynos_dp_core.h" 19#include "exynos_dp_core.h"
22#include "exynos_dp_reg.h" 20#include "exynos_dp_reg.h"
23 21
@@ -65,6 +63,28 @@ void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
65 writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP); 63 writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
66} 64}
67 65
66void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
67{
68 u32 reg;
69
70 reg = TX_TERMINAL_CTRL_50_OHM;
71 writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
72
73 reg = SEL_24M | TX_DVDD_BIT_1_0625V;
74 writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
75
76 reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
77 writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
78
79 reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
80 TX_CUR1_2X | TX_CUR_8_MA;
81 writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
82
83 reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
84 CH1_AMP_400_MV | CH0_AMP_400_MV;
85 writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
86}
87
68void exynos_dp_init_interrupt(struct exynos_dp_device *dp) 88void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
69{ 89{
70 /* Set interrupt pin assertion polarity as high */ 90 /* Set interrupt pin assertion polarity as high */
@@ -89,8 +109,6 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
89{ 109{
90 u32 reg; 110 u32 reg;
91 111
92 writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
93
94 exynos_dp_stop_video(dp); 112 exynos_dp_stop_video(dp);
95 exynos_dp_enable_video_mute(dp, 0); 113 exynos_dp_enable_video_mute(dp, 0);
96 114
@@ -131,9 +149,15 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
131 149
132 writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); 150 writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
133 151
152 exynos_dp_init_analog_param(dp);
134 exynos_dp_init_interrupt(dp); 153 exynos_dp_init_interrupt(dp);
135} 154}
136 155
156void exynos_dp_swreset(struct exynos_dp_device *dp)
157{
158 writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
159}
160
137void exynos_dp_config_interrupt(struct exynos_dp_device *dp) 161void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
138{ 162{
139 u32 reg; 163 u32 reg;
@@ -271,6 +295,7 @@ void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
271void exynos_dp_init_analog_func(struct exynos_dp_device *dp) 295void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
272{ 296{
273 u32 reg; 297 u32 reg;
298 int timeout_loop = 0;
274 299
275 exynos_dp_set_analog_power_down(dp, POWER_ALL, 0); 300 exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
276 301
@@ -282,9 +307,19 @@ void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
282 writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL); 307 writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
283 308
284 /* Power up PLL */ 309 /* Power up PLL */
285 if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) 310 if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
286 exynos_dp_set_pll_power_down(dp, 0); 311 exynos_dp_set_pll_power_down(dp, 0);
287 312
313 while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
314 timeout_loop++;
315 if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
316 dev_err(dp->dev, "failed to get pll lock status\n");
317 return;
318 }
319 usleep_range(10, 20);
320 }
321 }
322
288 /* Enable Serdes FIFO function and Link symbol clock domain module */ 323 /* Enable Serdes FIFO function and Link symbol clock domain module */
289 reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); 324 reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
290 reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N 325 reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/video/exynos/exynos_dp_reg.h
index 42f608e2a43e..125b27cd57ae 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/video/exynos/exynos_dp_reg.h
@@ -24,6 +24,12 @@
24 24
25#define EXYNOS_DP_LANE_MAP 0x35C 25#define EXYNOS_DP_LANE_MAP 0x35C
26 26
27#define EXYNOS_DP_ANALOG_CTL_1 0x370
28#define EXYNOS_DP_ANALOG_CTL_2 0x374
29#define EXYNOS_DP_ANALOG_CTL_3 0x378
30#define EXYNOS_DP_PLL_FILTER_CTL_1 0x37C
31#define EXYNOS_DP_TX_AMP_TUNING_CTL 0x380
32
27#define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390 33#define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390
28 34
29#define EXYNOS_DP_COMMON_INT_STA_1 0x3C4 35#define EXYNOS_DP_COMMON_INT_STA_1 0x3C4
@@ -166,6 +172,29 @@
166#define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0) 172#define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0)
167#define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0) 173#define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0)
168 174
175/* EXYNOS_DP_ANALOG_CTL_1 */
176#define TX_TERMINAL_CTRL_50_OHM (0x1 << 4)
177
178/* EXYNOS_DP_ANALOG_CTL_2 */
179#define SEL_24M (0x1 << 3)
180#define TX_DVDD_BIT_1_0625V (0x4 << 0)
181
182/* EXYNOS_DP_ANALOG_CTL_3 */
183#define DRIVE_DVDD_BIT_1_0625V (0x4 << 5)
184#define VCO_BIT_600_MICRO (0x5 << 0)
185
186/* EXYNOS_DP_PLL_FILTER_CTL_1 */
187#define PD_RING_OSC (0x1 << 6)
188#define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4)
189#define TX_CUR1_2X (0x1 << 2)
190#define TX_CUR_8_MA (0x2 << 0)
191
192/* EXYNOS_DP_TX_AMP_TUNING_CTL */
193#define CH3_AMP_400_MV (0x0 << 24)
194#define CH2_AMP_400_MV (0x0 << 16)
195#define CH1_AMP_400_MV (0x0 << 8)
196#define CH0_AMP_400_MV (0x0 << 0)
197
169/* EXYNOS_DP_AUX_HW_RETRY_CTL */ 198/* EXYNOS_DP_AUX_HW_RETRY_CTL */
170#define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8) 199#define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8)
171#define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3) 200#define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3)
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 557091dc0e97..6c1f5c314a42 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -58,7 +58,7 @@ static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device
58} 58}
59 59
60static struct regulator_bulk_data supplies[] = { 60static struct regulator_bulk_data supplies[] = {
61 { .supply = "vdd10", }, 61 { .supply = "vdd11", },
62 { .supply = "vdd18", }, 62 { .supply = "vdd18", },
63}; 63};
64 64
@@ -102,6 +102,8 @@ static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim)
102 /* set display timing. */ 102 /* set display timing. */
103 exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config); 103 exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config);
104 104
105 exynos_mipi_dsi_init_interrupt(dsim);
106
105 /* 107 /*
106 * data from Display controller(FIMD) is transferred in video mode 108 * data from Display controller(FIMD) is transferred in video mode
107 * but in case of command mode, all settigs is updated to registers. 109 * but in case of command mode, all settigs is updated to registers.
@@ -413,27 +415,30 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
413 goto err_platform_get_irq; 415 goto err_platform_get_irq;
414 } 416 }
415 417
418 init_completion(&dsim_wr_comp);
419 init_completion(&dsim_rd_comp);
420 platform_set_drvdata(pdev, dsim);
421
416 ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler, 422 ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler,
417 IRQF_SHARED, pdev->name, dsim); 423 IRQF_SHARED, dev_name(&pdev->dev), dsim);
418 if (ret != 0) { 424 if (ret != 0) {
419 dev_err(&pdev->dev, "failed to request dsim irq\n"); 425 dev_err(&pdev->dev, "failed to request dsim irq\n");
420 ret = -EINVAL; 426 ret = -EINVAL;
421 goto err_bind; 427 goto err_bind;
422 } 428 }
423 429
424 init_completion(&dsim_wr_comp); 430 /* enable interrupts */
425 init_completion(&dsim_rd_comp);
426
427 /* enable interrupt */
428 exynos_mipi_dsi_init_interrupt(dsim); 431 exynos_mipi_dsi_init_interrupt(dsim);
429 432
430 /* initialize mipi-dsi client(lcd panel). */ 433 /* initialize mipi-dsi client(lcd panel). */
431 if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe) 434 if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe)
432 dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev); 435 dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev);
433 436
434 /* in case that mipi got enabled at bootloader. */ 437 /* in case mipi-dsi has been enabled by bootloader */
435 if (dsim_pd->enabled) 438 if (dsim_pd->enabled) {
436 goto out; 439 exynos_mipi_regulator_enable(dsim);
440 goto done;
441 }
437 442
438 /* lcd panel power on. */ 443 /* lcd panel power on. */
439 if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on) 444 if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on)
@@ -453,12 +458,11 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
453 458
454 dsim->suspended = false; 459 dsim->suspended = false;
455 460
456out: 461done:
457 platform_set_drvdata(pdev, dsim); 462 platform_set_drvdata(pdev, dsim);
458 463
459 dev_dbg(&pdev->dev, "mipi-dsi driver(%s mode) has been probed.\n", 464 dev_dbg(&pdev->dev, "%s() completed sucessfuly (%s mode)\n", __func__,
460 (dsim_config->e_interface == DSIM_COMMAND) ? 465 dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB");
461 "CPU" : "RGB");
462 466
463 return 0; 467 return 0;
464 468
@@ -515,10 +519,10 @@ static int __devexit exynos_mipi_dsi_remove(struct platform_device *pdev)
515 return 0; 519 return 0;
516} 520}
517 521
518#ifdef CONFIG_PM 522#ifdef CONFIG_PM_SLEEP
519static int exynos_mipi_dsi_suspend(struct platform_device *pdev, 523static int exynos_mipi_dsi_suspend(struct device *dev)
520 pm_message_t state)
521{ 524{
525 struct platform_device *pdev = to_platform_device(dev);
522 struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); 526 struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
523 struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; 527 struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
524 struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; 528 struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -544,8 +548,9 @@ static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
544 return 0; 548 return 0;
545} 549}
546 550
547static int exynos_mipi_dsi_resume(struct platform_device *pdev) 551static int exynos_mipi_dsi_resume(struct device *dev)
548{ 552{
553 struct platform_device *pdev = to_platform_device(dev);
549 struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); 554 struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
550 struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; 555 struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
551 struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; 556 struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -577,19 +582,19 @@ static int exynos_mipi_dsi_resume(struct platform_device *pdev)
577 582
578 return 0; 583 return 0;
579} 584}
580#else
581#define exynos_mipi_dsi_suspend NULL
582#define exynos_mipi_dsi_resume NULL
583#endif 585#endif
584 586
587static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
588 SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume)
589};
590
585static struct platform_driver exynos_mipi_dsi_driver = { 591static struct platform_driver exynos_mipi_dsi_driver = {
586 .probe = exynos_mipi_dsi_probe, 592 .probe = exynos_mipi_dsi_probe,
587 .remove = __devexit_p(exynos_mipi_dsi_remove), 593 .remove = __devexit_p(exynos_mipi_dsi_remove),
588 .suspend = exynos_mipi_dsi_suspend,
589 .resume = exynos_mipi_dsi_resume,
590 .driver = { 594 .driver = {
591 .name = "exynos-mipi-dsim", 595 .name = "exynos-mipi-dsim",
592 .owner = THIS_MODULE, 596 .owner = THIS_MODULE,
597 .pm = &exynos_mipi_dsi_pm_ops,
593 }, 598 },
594}; 599};
595 600
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 14909c1d3832..47b533a183be 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -76,33 +76,25 @@ static unsigned int dpll_table[15] = {
76 76
77irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id) 77irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
78{ 78{
79 unsigned int intsrc = 0; 79 struct mipi_dsim_device *dsim = dev_id;
80 unsigned int intmsk = 0; 80 unsigned int intsrc, intmsk;
81 struct mipi_dsim_device *dsim = NULL; 81
82 82 if (dsim == NULL) {
83 dsim = dev_id; 83 dev_err(dsim->dev, "%s: wrong parameter\n", __func__);
84 if (!dsim) { 84 return IRQ_NONE;
85 dev_dbg(dsim->dev, KERN_ERR "%s:error: wrong parameter\n",
86 __func__);
87 return IRQ_HANDLED;
88 } 85 }
89 86
90 intsrc = exynos_mipi_dsi_read_interrupt(dsim); 87 intsrc = exynos_mipi_dsi_read_interrupt(dsim);
91 intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim); 88 intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
89 intmsk = ~intmsk & intsrc;
92 90
93 intmsk = ~(intmsk) & intsrc; 91 if (intsrc & INTMSK_RX_DONE) {
94
95 switch (intmsk) {
96 case INTMSK_RX_DONE:
97 complete(&dsim_rd_comp); 92 complete(&dsim_rd_comp);
98 dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n"); 93 dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n");
99 break; 94 }
100 case INTMSK_FIFO_EMPTY: 95 if (intsrc & INTMSK_FIFO_EMPTY) {
101 complete(&dsim_wr_comp); 96 complete(&dsim_wr_comp);
102 dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n"); 97 dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n");
103 break;
104 default:
105 break;
106 } 98 }
107 99
108 exynos_mipi_dsi_clear_interrupt(dsim, intmsk); 100 exynos_mipi_dsi_clear_interrupt(dsim, intmsk);
@@ -738,11 +730,11 @@ int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
738 if (dsim_config->auto_vertical_cnt == 0) { 730 if (dsim_config->auto_vertical_cnt == 0) {
739 exynos_mipi_dsi_set_main_disp_vporch(dsim, 731 exynos_mipi_dsi_set_main_disp_vporch(dsim,
740 dsim_config->cmd_allow, 732 dsim_config->cmd_allow,
741 timing->upper_margin, 733 timing->lower_margin,
742 timing->lower_margin); 734 timing->upper_margin);
743 exynos_mipi_dsi_set_main_disp_hporch(dsim, 735 exynos_mipi_dsi_set_main_disp_hporch(dsim,
744 timing->left_margin, 736 timing->right_margin,
745 timing->right_margin); 737 timing->left_margin);
746 exynos_mipi_dsi_set_main_disp_sync_area(dsim, 738 exynos_mipi_dsi_set_main_disp_sync_area(dsim,
747 timing->vsync_len, 739 timing->vsync_len,
748 timing->hsync_len); 740 timing->hsync_len);
diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/exynos/s6e8ax0.c
index 4aa9ac6218bf..05d080b63bc0 100644
--- a/drivers/video/exynos/s6e8ax0.c
+++ b/drivers/video/exynos/s6e8ax0.c
@@ -293,9 +293,20 @@ static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd)
293 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0, 293 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
294 0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8 294 0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8
295 }; 295 };
296 static const unsigned char data_to_send_panel_reverse[] = {
297 0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
298 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
299 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
300 0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1
301 };
296 302
297 ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, 303 if (lcd->dsim_dev->panel_reverse)
298 data_to_send, ARRAY_SIZE(data_to_send)); 304 ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
305 data_to_send_panel_reverse,
306 ARRAY_SIZE(data_to_send_panel_reverse));
307 else
308 ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
309 data_to_send, ARRAY_SIZE(data_to_send));
299} 310}
300 311
301static void s6e8ax0_display_cond(struct s6e8ax0 *lcd) 312static void s6e8ax0_display_cond(struct s6e8ax0 *lcd)
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index c27e153d8882..1ddeb11659d4 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -23,7 +23,7 @@
23#include <linux/rmap.h> 23#include <linux/rmap.h>
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25 25
26struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) 26static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27{ 27{
28 void *screen_base = (void __force *) info->screen_base; 28 void *screen_base = (void __force *) info->screen_base;
29 struct page *page; 29 struct page *page;
@@ -107,6 +107,10 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
107 /* protect against the workqueue changing the page list */ 107 /* protect against the workqueue changing the page list */
108 mutex_lock(&fbdefio->lock); 108 mutex_lock(&fbdefio->lock);
109 109
110 /* first write in this cycle, notify the driver */
111 if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
112 fbdefio->first_io(info);
113
110 /* 114 /*
111 * We want the page to remain locked from ->page_mkwrite until 115 * We want the page to remain locked from ->page_mkwrite until
112 * the PTE is marked dirty to avoid page_mkclean() being called 116 * the PTE is marked dirty to avoid page_mkclean() being called
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 67afa9c2289d..a55e3669d135 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -80,6 +80,8 @@ EXPORT_SYMBOL(framebuffer_alloc);
80 */ 80 */
81void framebuffer_release(struct fb_info *info) 81void framebuffer_release(struct fb_info *info)
82{ 82{
83 if (!info)
84 return;
83 kfree(info->apertures); 85 kfree(info->apertures);
84 kfree(info); 86 kfree(info);
85} 87}
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 6af3f16754f0..458c00664ade 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -834,7 +834,6 @@ static void update_lcdc(struct fb_info *info)
834 diu_ops.set_pixel_clock(var->pixclock); 834 diu_ops.set_pixel_clock(var->pixclock);
835 835
836 out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */ 836 out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */
837 out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
838 out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */ 837 out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */
839 out_be32(&hw->plut, 0x01F5F666); 838 out_be32(&hw->plut, 0x01F5F666);
840 839
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 02fd2263610c..bdcbfbae2777 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -680,6 +680,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
680 + dinfo->fb.size); 680 + dinfo->fb.size);
681 if (!dinfo->aperture.virtual) { 681 if (!dinfo->aperture.virtual) {
682 ERR_MSG("Cannot remap FB region.\n"); 682 ERR_MSG("Cannot remap FB region.\n");
683 agp_backend_release(bridge);
683 cleanup(dinfo); 684 cleanup(dinfo);
684 return -ENODEV; 685 return -ENODEV;
685 } 686 }
@@ -689,6 +690,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
689 INTEL_REG_SIZE); 690 INTEL_REG_SIZE);
690 if (!dinfo->mmio_base) { 691 if (!dinfo->mmio_base) {
691 ERR_MSG("Cannot remap MMIO region.\n"); 692 ERR_MSG("Cannot remap MMIO region.\n");
693 agp_backend_release(bridge);
692 cleanup(dinfo); 694 cleanup(dinfo);
693 return -ENODEV; 695 return -ENODEV;
694 } 696 }
diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c
index 273769bb8deb..c87e17afb3e2 100644
--- a/drivers/video/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/mb862xx/mb862xx-i2c.c
@@ -68,7 +68,7 @@ static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last)
68 return 1; 68 return 1;
69} 69}
70 70
71void mb862xx_i2c_stop(struct i2c_adapter *adap) 71static void mb862xx_i2c_stop(struct i2c_adapter *adap)
72{ 72{
73 struct mb862xxfb_par *par = adap->algo_data; 73 struct mb862xxfb_par *par = adap->algo_data;
74 74
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 11a7a333701d..00ce1f34b496 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -579,7 +579,7 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
579 579
580static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL); 580static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
581 581
582irqreturn_t mb862xx_intr(int irq, void *dev_id) 582static irqreturn_t mb862xx_intr(int irq, void *dev_id)
583{ 583{
584 struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id; 584 struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id;
585 unsigned long reg_ist, mask; 585 unsigned long reg_ist, mask;
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 55bf6196b7a0..85e4f44bfa61 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -950,7 +950,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
950 950
951 mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr, 951 mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
952 res_size(mfbi->fb_req)); 952 res_size(mfbi->fb_req));
953 if (!mfbi->reg_virt_addr) { 953 if (!mfbi->fb_virt_addr) {
954 dev_err(&dev->dev, "failed to ioremap frame buffer\n"); 954 dev_err(&dev->dev, "failed to ioremap frame buffer\n");
955 ret = -EINVAL; 955 ret = -EINVAL;
956 goto err4; 956 goto err4;
@@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev)
1045 1045
1046static struct platform_driver mbxfb_driver = { 1046static struct platform_driver mbxfb_driver = {
1047 .probe = mbxfb_probe, 1047 .probe = mbxfb_probe,
1048 .remove = mbxfb_remove, 1048 .remove = __devexit_p(mbxfb_remove),
1049 .suspend = mbxfb_suspend, 1049 .suspend = mbxfb_suspend,
1050 .resume = mbxfb_resume, 1050 .resume = mbxfb_resume,
1051 .driver = { 1051 .driver = {
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 6c6bc578d0fc..abbe691047bd 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -889,6 +889,18 @@ static int __devexit mxsfb_remove(struct platform_device *pdev)
889 return 0; 889 return 0;
890} 890}
891 891
892static void mxsfb_shutdown(struct platform_device *pdev)
893{
894 struct fb_info *fb_info = platform_get_drvdata(pdev);
895 struct mxsfb_info *host = to_imxfb_host(fb_info);
896
897 /*
898 * Force stop the LCD controller as keeping it running during reboot
899 * might interfere with the BootROM's boot mode pads sampling.
900 */
901 writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
902}
903
892static struct platform_device_id mxsfb_devtype[] = { 904static struct platform_device_id mxsfb_devtype[] = {
893 { 905 {
894 .name = "imx23-fb", 906 .name = "imx23-fb",
@@ -905,6 +917,7 @@ MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
905static struct platform_driver mxsfb_driver = { 917static struct platform_driver mxsfb_driver = {
906 .probe = mxsfb_probe, 918 .probe = mxsfb_probe,
907 .remove = __devexit_p(mxsfb_remove), 919 .remove = __devexit_p(mxsfb_remove),
920 .shutdown = mxsfb_shutdown,
908 .id_table = mxsfb_devtype, 921 .id_table = mxsfb_devtype,
909 .driver = { 922 .driver = {
910 .name = DRIVER_NAME, 923 .name = DRIVER_NAME,
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 1e7536d9a8fc..b48f95f0dfe2 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -39,14 +39,6 @@ config FB_OMAP_LCD_MIPID
39 the Mobile Industry Processor Interface DBI-C/DCS 39 the Mobile Industry Processor Interface DBI-C/DCS
40 specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3) 40 specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3)
41 41
42config FB_OMAP_BOOTLOADER_INIT
43 bool "Check bootloader initialization"
44 depends on FB_OMAP
45 help
46 Say Y here if you want to enable checking if the bootloader has
47 already initialized the display controller. In this case the
48 driver will skip the initialization.
49
50config FB_OMAP_CONSISTENT_DMA_SIZE 42config FB_OMAP_CONSISTENT_DMA_SIZE
51 int "Consistent DMA memory size (MB)" 43 int "Consistent DMA memory size (MB)"
52 depends on FB_OMAP 44 depends on FB_OMAP
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index 74e7cf078505..ad741c3d1ae1 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -739,12 +739,6 @@ static void acx_panel_set_timings(struct omap_dss_device *dssdev,
739 } 739 }
740} 740}
741 741
742static void acx_panel_get_timings(struct omap_dss_device *dssdev,
743 struct omap_video_timings *timings)
744{
745 *timings = dssdev->panel.timings;
746}
747
748static int acx_panel_check_timings(struct omap_dss_device *dssdev, 742static int acx_panel_check_timings(struct omap_dss_device *dssdev,
749 struct omap_video_timings *timings) 743 struct omap_video_timings *timings)
750{ 744{
@@ -762,7 +756,6 @@ static struct omap_dss_driver acx_panel_driver = {
762 .resume = acx_panel_resume, 756 .resume = acx_panel_resume,
763 757
764 .set_timings = acx_panel_set_timings, 758 .set_timings = acx_panel_set_timings,
765 .get_timings = acx_panel_get_timings,
766 .check_timings = acx_panel_check_timings, 759 .check_timings = acx_panel_check_timings,
767 760
768 .get_recommended_bpp = acx_get_recommended_bpp, 761 .get_recommended_bpp = acx_get_recommended_bpp,
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 30fe4dfeb227..e42f9dc22123 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -386,6 +386,106 @@ static struct panel_config generic_dpi_panels[] = {
386 386
387 .name = "innolux_at080tn52", 387 .name = "innolux_at080tn52",
388 }, 388 },
389
390 /* Mitsubishi AA084SB01 */
391 {
392 {
393 .x_res = 800,
394 .y_res = 600,
395 .pixel_clock = 40000,
396
397 .hsw = 1,
398 .hfp = 254,
399 .hbp = 1,
400
401 .vsw = 1,
402 .vfp = 26,
403 .vbp = 1,
404 },
405 .config = OMAP_DSS_LCD_TFT,
406 .name = "mitsubishi_aa084sb01",
407 },
408 /* EDT ET0500G0DH6 */
409 {
410 {
411 .x_res = 800,
412 .y_res = 480,
413 .pixel_clock = 33260,
414
415 .hsw = 128,
416 .hfp = 216,
417 .hbp = 40,
418
419 .vsw = 2,
420 .vfp = 35,
421 .vbp = 10,
422 },
423 .config = OMAP_DSS_LCD_TFT,
424 .name = "edt_et0500g0dh6",
425 },
426
427 /* Prime-View PD050VL1 */
428 {
429 {
430 .x_res = 640,
431 .y_res = 480,
432
433 .pixel_clock = 25000,
434
435 .hsw = 96,
436 .hfp = 18,
437 .hbp = 46,
438
439 .vsw = 2,
440 .vfp = 10,
441 .vbp = 33,
442 },
443 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
444 OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
445 .name = "primeview_pd050vl1",
446 },
447
448 /* Prime-View PM070WL4 */
449 {
450 {
451 .x_res = 800,
452 .y_res = 480,
453
454 .pixel_clock = 32000,
455
456 .hsw = 128,
457 .hfp = 42,
458 .hbp = 86,
459
460 .vsw = 2,
461 .vfp = 10,
462 .vbp = 33,
463 },
464 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
465 OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
466 .name = "primeview_pm070wl4",
467 },
468
469 /* Prime-View PD104SLF */
470 {
471 {
472 .x_res = 800,
473 .y_res = 600,
474
475 .pixel_clock = 40000,
476
477 .hsw = 128,
478 .hfp = 42,
479 .hbp = 86,
480
481 .vsw = 4,
482 .vfp = 1,
483 .vbp = 23,
484 },
485 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
486 OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
487 .name = "primeview_pd104slf",
488 },
389}; 489};
390 490
391struct panel_drv_data { 491struct panel_drv_data {
@@ -549,12 +649,6 @@ static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
549 dpi_set_timings(dssdev, timings); 649 dpi_set_timings(dssdev, timings);
550} 650}
551 651
552static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
553 struct omap_video_timings *timings)
554{
555 *timings = dssdev->panel.timings;
556}
557
558static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev, 652static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
559 struct omap_video_timings *timings) 653 struct omap_video_timings *timings)
560{ 654{
@@ -571,7 +665,6 @@ static struct omap_dss_driver dpi_driver = {
571 .resume = generic_dpi_panel_resume, 665 .resume = generic_dpi_panel_resume,
572 666
573 .set_timings = generic_dpi_panel_set_timings, 667 .set_timings = generic_dpi_panel_set_timings,
574 .get_timings = generic_dpi_panel_get_timings,
575 .check_timings = generic_dpi_panel_check_timings, 668 .check_timings = generic_dpi_panel_check_timings,
576 669
577 .driver = { 670 .driver = {
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index dc9408dc93d1..4a34cdc1371b 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -610,12 +610,6 @@ static int n8x0_panel_resume(struct omap_dss_device *dssdev)
610 return 0; 610 return 0;
611} 611}
612 612
613static void n8x0_panel_get_timings(struct omap_dss_device *dssdev,
614 struct omap_video_timings *timings)
615{
616 *timings = dssdev->panel.timings;
617}
618
619static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev, 613static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
620 u16 *xres, u16 *yres) 614 u16 *xres, u16 *yres)
621{ 615{
@@ -678,8 +672,6 @@ static struct omap_dss_driver n8x0_panel_driver = {
678 .get_resolution = n8x0_panel_get_resolution, 672 .get_resolution = n8x0_panel_get_resolution,
679 .get_recommended_bpp = omapdss_default_get_recommended_bpp, 673 .get_recommended_bpp = omapdss_default_get_recommended_bpp,
680 674
681 .get_timings = n8x0_panel_get_timings,
682
683 .driver = { 675 .driver = {
684 .name = "n8x0_panel", 676 .name = "n8x0_panel",
685 .owner = THIS_MODULE, 677 .owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index b2dd88b48420..901576eb5a84 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -30,7 +30,6 @@
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/regulator/consumer.h>
34#include <linux/mutex.h> 33#include <linux/mutex.h>
35 34
36#include <video/omapdss.h> 35#include <video/omapdss.h>
@@ -55,73 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
55 54
56static int taal_panel_reset(struct omap_dss_device *dssdev); 55static int taal_panel_reset(struct omap_dss_device *dssdev);
57 56
58struct panel_regulator {
59 struct regulator *regulator;
60 const char *name;
61 int min_uV;
62 int max_uV;
63};
64
65static void free_regulators(struct panel_regulator *regulators, int n)
66{
67 int i;
68
69 for (i = 0; i < n; i++) {
70 /* disable/put in reverse order */
71 regulator_disable(regulators[n - i - 1].regulator);
72 regulator_put(regulators[n - i - 1].regulator);
73 }
74}
75
76static int init_regulators(struct omap_dss_device *dssdev,
77 struct panel_regulator *regulators, int n)
78{
79 int r, i, v;
80
81 for (i = 0; i < n; i++) {
82 struct regulator *reg;
83
84 reg = regulator_get(&dssdev->dev, regulators[i].name);
85 if (IS_ERR(reg)) {
86 dev_err(&dssdev->dev, "failed to get regulator %s\n",
87 regulators[i].name);
88 r = PTR_ERR(reg);
89 goto err;
90 }
91
92 /* FIXME: better handling of fixed vs. variable regulators */
93 v = regulator_get_voltage(reg);
94 if (v < regulators[i].min_uV || v > regulators[i].max_uV) {
95 r = regulator_set_voltage(reg, regulators[i].min_uV,
96 regulators[i].max_uV);
97 if (r) {
98 dev_err(&dssdev->dev,
99 "failed to set regulator %s voltage\n",
100 regulators[i].name);
101 regulator_put(reg);
102 goto err;
103 }
104 }
105
106 r = regulator_enable(reg);
107 if (r) {
108 dev_err(&dssdev->dev, "failed to enable regulator %s\n",
109 regulators[i].name);
110 regulator_put(reg);
111 goto err;
112 }
113
114 regulators[i].regulator = reg;
115 }
116
117 return 0;
118
119err:
120 free_regulators(regulators, i);
121
122 return r;
123}
124
125/** 57/**
126 * struct panel_config - panel configuration 58 * struct panel_config - panel configuration
127 * @name: panel name 59 * @name: panel name
@@ -150,8 +82,6 @@ struct panel_config {
150 unsigned int low; 82 unsigned int low;
151 } reset_sequence; 83 } reset_sequence;
152 84
153 struct panel_regulator *regulators;
154 int num_regulators;
155}; 85};
156 86
157enum { 87enum {
@@ -577,12 +507,6 @@ static const struct backlight_ops taal_bl_ops = {
577 .update_status = taal_bl_update_status, 507 .update_status = taal_bl_update_status,
578}; 508};
579 509
580static void taal_get_timings(struct omap_dss_device *dssdev,
581 struct omap_video_timings *timings)
582{
583 *timings = dssdev->panel.timings;
584}
585
586static void taal_get_resolution(struct omap_dss_device *dssdev, 510static void taal_get_resolution(struct omap_dss_device *dssdev,
587 u16 *xres, u16 *yres) 511 u16 *xres, u16 *yres)
588{ 512{
@@ -602,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev,
602{ 526{
603 struct omap_dss_device *dssdev = to_dss_device(dev); 527 struct omap_dss_device *dssdev = to_dss_device(dev);
604 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 528 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
605 u8 errors; 529 u8 errors = 0;
606 int r; 530 int r;
607 531
608 mutex_lock(&td->lock); 532 mutex_lock(&td->lock);
@@ -977,11 +901,6 @@ static int taal_probe(struct omap_dss_device *dssdev)
977 901
978 atomic_set(&td->do_update, 0); 902 atomic_set(&td->do_update, 0);
979 903
980 r = init_regulators(dssdev, panel_config->regulators,
981 panel_config->num_regulators);
982 if (r)
983 goto err_reg;
984
985 td->workqueue = create_singlethread_workqueue("taal_esd"); 904 td->workqueue = create_singlethread_workqueue("taal_esd");
986 if (td->workqueue == NULL) { 905 if (td->workqueue == NULL) {
987 dev_err(&dssdev->dev, "can't create ESD workqueue\n"); 906 dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -1087,8 +1006,6 @@ err_bl:
1087err_rst_gpio: 1006err_rst_gpio:
1088 destroy_workqueue(td->workqueue); 1007 destroy_workqueue(td->workqueue);
1089err_wq: 1008err_wq:
1090 free_regulators(panel_config->regulators, panel_config->num_regulators);
1091err_reg:
1092 kfree(td); 1009 kfree(td);
1093err: 1010err:
1094 return r; 1011 return r;
@@ -1125,9 +1042,6 @@ static void __exit taal_remove(struct omap_dss_device *dssdev)
1125 /* reset, to be sure that the panel is in a valid state */ 1042 /* reset, to be sure that the panel is in a valid state */
1126 taal_hw_reset(dssdev); 1043 taal_hw_reset(dssdev);
1127 1044
1128 free_regulators(td->panel_config->regulators,
1129 td->panel_config->num_regulators);
1130
1131 if (gpio_is_valid(panel_data->reset_gpio)) 1045 if (gpio_is_valid(panel_data->reset_gpio))
1132 gpio_free(panel_data->reset_gpio); 1046 gpio_free(panel_data->reset_gpio);
1133 1047
@@ -1909,8 +1823,6 @@ static struct omap_dss_driver taal_driver = {
1909 .run_test = taal_run_test, 1823 .run_test = taal_run_test,
1910 .memory_read = taal_memory_read, 1824 .memory_read = taal_memory_read,
1911 1825
1912 .get_timings = taal_get_timings,
1913
1914 .driver = { 1826 .driver = {
1915 .name = "taal", 1827 .name = "taal",
1916 .owner = THIS_MODULE, 1828 .owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index 52637fa8fda8..bff306e041ca 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -47,13 +47,9 @@ struct panel_drv_data {
47 struct mutex lock; 47 struct mutex lock;
48 48
49 int pd_gpio; 49 int pd_gpio;
50};
51 50
52static inline struct tfp410_platform_data 51 struct i2c_adapter *i2c_adapter;
53*get_pdata(const struct omap_dss_device *dssdev) 52};
54{
55 return dssdev->data;
56}
57 53
58static int tfp410_power_on(struct omap_dss_device *dssdev) 54static int tfp410_power_on(struct omap_dss_device *dssdev)
59{ 55{
@@ -68,7 +64,7 @@ static int tfp410_power_on(struct omap_dss_device *dssdev)
68 goto err0; 64 goto err0;
69 65
70 if (gpio_is_valid(ddata->pd_gpio)) 66 if (gpio_is_valid(ddata->pd_gpio))
71 gpio_set_value(ddata->pd_gpio, 1); 67 gpio_set_value_cansleep(ddata->pd_gpio, 1);
72 68
73 return 0; 69 return 0;
74err0: 70err0:
@@ -83,18 +79,18 @@ static void tfp410_power_off(struct omap_dss_device *dssdev)
83 return; 79 return;
84 80
85 if (gpio_is_valid(ddata->pd_gpio)) 81 if (gpio_is_valid(ddata->pd_gpio))
86 gpio_set_value(ddata->pd_gpio, 0); 82 gpio_set_value_cansleep(ddata->pd_gpio, 0);
87 83
88 omapdss_dpi_display_disable(dssdev); 84 omapdss_dpi_display_disable(dssdev);
89} 85}
90 86
91static int tfp410_probe(struct omap_dss_device *dssdev) 87static int tfp410_probe(struct omap_dss_device *dssdev)
92{ 88{
93 struct tfp410_platform_data *pdata = get_pdata(dssdev);
94 struct panel_drv_data *ddata; 89 struct panel_drv_data *ddata;
95 int r; 90 int r;
91 int i2c_bus_num;
96 92
97 ddata = kzalloc(sizeof(*ddata), GFP_KERNEL); 93 ddata = devm_kzalloc(&dssdev->dev, sizeof(*ddata), GFP_KERNEL);
98 if (!ddata) 94 if (!ddata)
99 return -ENOMEM; 95 return -ENOMEM;
100 96
@@ -104,10 +100,15 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
104 ddata->dssdev = dssdev; 100 ddata->dssdev = dssdev;
105 mutex_init(&ddata->lock); 101 mutex_init(&ddata->lock);
106 102
107 if (pdata) 103 if (dssdev->data) {
104 struct tfp410_platform_data *pdata = dssdev->data;
105
108 ddata->pd_gpio = pdata->power_down_gpio; 106 ddata->pd_gpio = pdata->power_down_gpio;
109 else 107 i2c_bus_num = pdata->i2c_bus_num;
108 } else {
110 ddata->pd_gpio = -1; 109 ddata->pd_gpio = -1;
110 i2c_bus_num = -1;
111 }
111 112
112 if (gpio_is_valid(ddata->pd_gpio)) { 113 if (gpio_is_valid(ddata->pd_gpio)) {
113 r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW, 114 r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW,
@@ -115,13 +116,31 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
115 if (r) { 116 if (r) {
116 dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n", 117 dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n",
117 ddata->pd_gpio); 118 ddata->pd_gpio);
118 ddata->pd_gpio = -1; 119 return r;
119 } 120 }
120 } 121 }
121 122
123 if (i2c_bus_num != -1) {
124 struct i2c_adapter *adapter;
125
126 adapter = i2c_get_adapter(i2c_bus_num);
127 if (!adapter) {
128 dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
129 i2c_bus_num);
130 r = -EINVAL;
131 goto err_i2c;
132 }
133
134 ddata->i2c_adapter = adapter;
135 }
136
122 dev_set_drvdata(&dssdev->dev, ddata); 137 dev_set_drvdata(&dssdev->dev, ddata);
123 138
124 return 0; 139 return 0;
140err_i2c:
141 if (gpio_is_valid(ddata->pd_gpio))
142 gpio_free(ddata->pd_gpio);
143 return r;
125} 144}
126 145
127static void __exit tfp410_remove(struct omap_dss_device *dssdev) 146static void __exit tfp410_remove(struct omap_dss_device *dssdev)
@@ -130,14 +149,15 @@ static void __exit tfp410_remove(struct omap_dss_device *dssdev)
130 149
131 mutex_lock(&ddata->lock); 150 mutex_lock(&ddata->lock);
132 151
152 if (ddata->i2c_adapter)
153 i2c_put_adapter(ddata->i2c_adapter);
154
133 if (gpio_is_valid(ddata->pd_gpio)) 155 if (gpio_is_valid(ddata->pd_gpio))
134 gpio_free(ddata->pd_gpio); 156 gpio_free(ddata->pd_gpio);
135 157
136 dev_set_drvdata(&dssdev->dev, NULL); 158 dev_set_drvdata(&dssdev->dev, NULL);
137 159
138 mutex_unlock(&ddata->lock); 160 mutex_unlock(&ddata->lock);
139
140 kfree(ddata);
141} 161}
142 162
143static int tfp410_enable(struct omap_dss_device *dssdev) 163static int tfp410_enable(struct omap_dss_device *dssdev)
@@ -269,27 +289,17 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
269 u8 *edid, int len) 289 u8 *edid, int len)
270{ 290{
271 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); 291 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
272 struct tfp410_platform_data *pdata = get_pdata(dssdev);
273 struct i2c_adapter *adapter;
274 int r, l, bytes_read; 292 int r, l, bytes_read;
275 293
276 mutex_lock(&ddata->lock); 294 mutex_lock(&ddata->lock);
277 295
278 if (pdata->i2c_bus_num == 0) { 296 if (!ddata->i2c_adapter) {
279 r = -ENODEV; 297 r = -ENODEV;
280 goto err; 298 goto err;
281 } 299 }
282 300
283 adapter = i2c_get_adapter(pdata->i2c_bus_num);
284 if (!adapter) {
285 dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
286 pdata->i2c_bus_num);
287 r = -EINVAL;
288 goto err;
289 }
290
291 l = min(EDID_LENGTH, len); 301 l = min(EDID_LENGTH, len);
292 r = tfp410_ddc_read(adapter, edid, l, 0); 302 r = tfp410_ddc_read(ddata->i2c_adapter, edid, l, 0);
293 if (r) 303 if (r)
294 goto err; 304 goto err;
295 305
@@ -299,7 +309,7 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
299 if (len > EDID_LENGTH && edid[0x7e] > 0) { 309 if (len > EDID_LENGTH && edid[0x7e] > 0) {
300 l = min(EDID_LENGTH, len - EDID_LENGTH); 310 l = min(EDID_LENGTH, len - EDID_LENGTH);
301 311
302 r = tfp410_ddc_read(adapter, edid + EDID_LENGTH, 312 r = tfp410_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
303 l, EDID_LENGTH); 313 l, EDID_LENGTH);
304 if (r) 314 if (r)
305 goto err; 315 goto err;
@@ -319,21 +329,15 @@ err:
319static bool tfp410_detect(struct omap_dss_device *dssdev) 329static bool tfp410_detect(struct omap_dss_device *dssdev)
320{ 330{
321 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); 331 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
322 struct tfp410_platform_data *pdata = get_pdata(dssdev);
323 struct i2c_adapter *adapter;
324 unsigned char out; 332 unsigned char out;
325 int r; 333 int r;
326 334
327 mutex_lock(&ddata->lock); 335 mutex_lock(&ddata->lock);
328 336
329 if (pdata->i2c_bus_num == 0) 337 if (!ddata->i2c_adapter)
330 goto out;
331
332 adapter = i2c_get_adapter(pdata->i2c_bus_num);
333 if (!adapter)
334 goto out; 338 goto out;
335 339
336 r = tfp410_ddc_read(adapter, &out, 1, 0); 340 r = tfp410_ddc_read(ddata->i2c_adapter, &out, 1, 0);
337 341
338 mutex_unlock(&ddata->lock); 342 mutex_unlock(&ddata->lock);
339 343
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 32f3fcd7f0f0..4b6448b3c31f 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -272,13 +272,16 @@ static const struct omap_video_timings tpo_td043_timings = {
272static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043) 272static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
273{ 273{
274 int nreset_gpio = tpo_td043->nreset_gpio; 274 int nreset_gpio = tpo_td043->nreset_gpio;
275 int r;
275 276
276 if (tpo_td043->powered_on) 277 if (tpo_td043->powered_on)
277 return 0; 278 return 0;
278 279
279 regulator_enable(tpo_td043->vcc_reg); 280 r = regulator_enable(tpo_td043->vcc_reg);
281 if (r != 0)
282 return r;
280 283
281 /* wait for regulator to stabilize */ 284 /* wait for panel to stabilize */
282 msleep(160); 285 msleep(160);
283 286
284 if (gpio_is_valid(nreset_gpio)) 287 if (gpio_is_valid(nreset_gpio))
@@ -470,6 +473,18 @@ static void tpo_td043_remove(struct omap_dss_device *dssdev)
470 gpio_free(nreset_gpio); 473 gpio_free(nreset_gpio);
471} 474}
472 475
476static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
477 struct omap_video_timings *timings)
478{
479 dpi_set_timings(dssdev, timings);
480}
481
482static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
483 struct omap_video_timings *timings)
484{
485 return dpi_check_timings(dssdev, timings);
486}
487
473static struct omap_dss_driver tpo_td043_driver = { 488static struct omap_dss_driver tpo_td043_driver = {
474 .probe = tpo_td043_probe, 489 .probe = tpo_td043_probe,
475 .remove = tpo_td043_remove, 490 .remove = tpo_td043_remove,
@@ -481,6 +496,9 @@ static struct omap_dss_driver tpo_td043_driver = {
481 .set_mirror = tpo_td043_set_hmirror, 496 .set_mirror = tpo_td043_set_hmirror,
482 .get_mirror = tpo_td043_get_hmirror, 497 .get_mirror = tpo_td043_get_hmirror,
483 498
499 .set_timings = tpo_td043_set_timings,
500 .check_timings = tpo_td043_check_timings,
501
484 .driver = { 502 .driver = {
485 .name = "tpo_td043mtea1_panel", 503 .name = "tpo_td043mtea1_panel",
486 .owner = THIS_MODULE, 504 .owner = THIS_MODULE,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 7be7c06a249e..43324e5ed25f 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -68,6 +68,10 @@ config OMAP4_DSS_HDMI
68 HDMI Interface. This adds the High Definition Multimedia Interface. 68 HDMI Interface. This adds the High Definition Multimedia Interface.
69 See http://www.hdmi.org/ for HDMI specification. 69 See http://www.hdmi.org/ for HDMI specification.
70 70
71config OMAP4_DSS_HDMI_AUDIO
72 bool
73 depends on OMAP4_DSS_HDMI
74
71config OMAP2_DSS_SDI 75config OMAP2_DSS_SDI
72 bool "SDI support" 76 bool "SDI support"
73 depends on ARCH_OMAP3 77 depends on ARCH_OMAP3
@@ -90,15 +94,6 @@ config OMAP2_DSS_DSI
90 94
91 See http://www.mipi.org/ for DSI spesifications. 95 See http://www.mipi.org/ for DSI spesifications.
92 96
93config OMAP2_DSS_FAKE_VSYNC
94 bool "Fake VSYNC irq from manual update displays"
95 default n
96 help
97 If this is selected, DSI will generate a fake DISPC VSYNC interrupt
98 when DSI has sent a frame. This is only needed with DSI or RFBI
99 displays using manual mode, and you want VSYNC to, for example,
100 time animation.
101
102config OMAP2_DSS_MIN_FCK_PER_PCK 97config OMAP2_DSS_MIN_FCK_PER_PCK
103 int "Minimum FCK/PCK ratio (for scaling)" 98 int "Minimum FCK/PCK ratio (for scaling)"
104 range 0 32 99 range 0 32
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index b10b3bc1931e..ab22cc224f3e 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -99,6 +99,11 @@ struct mgr_priv_data {
99 99
100 /* If true, a display is enabled using this manager */ 100 /* If true, a display is enabled using this manager */
101 bool enabled; 101 bool enabled;
102
103 bool extra_info_dirty;
104 bool shadow_extra_info_dirty;
105
106 struct omap_video_timings timings;
102}; 107};
103 108
104static struct { 109static struct {
@@ -176,7 +181,7 @@ static bool mgr_manual_update(struct omap_overlay_manager *mgr)
176} 181}
177 182
178static int dss_check_settings_low(struct omap_overlay_manager *mgr, 183static int dss_check_settings_low(struct omap_overlay_manager *mgr,
179 struct omap_dss_device *dssdev, bool applying) 184 bool applying)
180{ 185{
181 struct omap_overlay_info *oi; 186 struct omap_overlay_info *oi;
182 struct omap_overlay_manager_info *mi; 187 struct omap_overlay_manager_info *mi;
@@ -187,6 +192,9 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
187 192
188 mp = get_mgr_priv(mgr); 193 mp = get_mgr_priv(mgr);
189 194
195 if (!mp->enabled)
196 return 0;
197
190 if (applying && mp->user_info_dirty) 198 if (applying && mp->user_info_dirty)
191 mi = &mp->user_info; 199 mi = &mp->user_info;
192 else 200 else
@@ -206,26 +214,24 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
206 ois[ovl->id] = oi; 214 ois[ovl->id] = oi;
207 } 215 }
208 216
209 return dss_mgr_check(mgr, dssdev, mi, ois); 217 return dss_mgr_check(mgr, mi, &mp->timings, ois);
210} 218}
211 219
212/* 220/*
213 * check manager and overlay settings using overlay_info from data->info 221 * check manager and overlay settings using overlay_info from data->info
214 */ 222 */
215static int dss_check_settings(struct omap_overlay_manager *mgr, 223static int dss_check_settings(struct omap_overlay_manager *mgr)
216 struct omap_dss_device *dssdev)
217{ 224{
218 return dss_check_settings_low(mgr, dssdev, false); 225 return dss_check_settings_low(mgr, false);
219} 226}
220 227
221/* 228/*
222 * check manager and overlay settings using overlay_info from ovl->info if 229 * check manager and overlay settings using overlay_info from ovl->info if
223 * dirty and from data->info otherwise 230 * dirty and from data->info otherwise
224 */ 231 */
225static int dss_check_settings_apply(struct omap_overlay_manager *mgr, 232static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
226 struct omap_dss_device *dssdev)
227{ 233{
228 return dss_check_settings_low(mgr, dssdev, true); 234 return dss_check_settings_low(mgr, true);
229} 235}
230 236
231static bool need_isr(void) 237static bool need_isr(void)
@@ -261,6 +267,20 @@ static bool need_isr(void)
261 if (mp->shadow_info_dirty) 267 if (mp->shadow_info_dirty)
262 return true; 268 return true;
263 269
270 /*
271 * NOTE: we don't check extra_info flags for disabled
272 * managers, once the manager is enabled, the extra_info
273 * related manager changes will be taken in by HW.
274 */
275
276 /* to write new values to registers */
277 if (mp->extra_info_dirty)
278 return true;
279
280 /* to set GO bit */
281 if (mp->shadow_extra_info_dirty)
282 return true;
283
264 list_for_each_entry(ovl, &mgr->overlays, list) { 284 list_for_each_entry(ovl, &mgr->overlays, list) {
265 struct ovl_priv_data *op; 285 struct ovl_priv_data *op;
266 286
@@ -305,7 +325,7 @@ static bool need_go(struct omap_overlay_manager *mgr)
305 325
306 mp = get_mgr_priv(mgr); 326 mp = get_mgr_priv(mgr);
307 327
308 if (mp->shadow_info_dirty) 328 if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
309 return true; 329 return true;
310 330
311 list_for_each_entry(ovl, &mgr->overlays, list) { 331 list_for_each_entry(ovl, &mgr->overlays, list) {
@@ -320,20 +340,16 @@ static bool need_go(struct omap_overlay_manager *mgr)
320/* returns true if an extra_info field is currently being updated */ 340/* returns true if an extra_info field is currently being updated */
321static bool extra_info_update_ongoing(void) 341static bool extra_info_update_ongoing(void)
322{ 342{
323 const int num_ovls = omap_dss_get_num_overlays(); 343 const int num_mgrs = dss_feat_get_num_mgrs();
324 struct ovl_priv_data *op;
325 struct omap_overlay *ovl;
326 struct mgr_priv_data *mp;
327 int i; 344 int i;
328 345
329 for (i = 0; i < num_ovls; ++i) { 346 for (i = 0; i < num_mgrs; ++i) {
330 ovl = omap_dss_get_overlay(i); 347 struct omap_overlay_manager *mgr;
331 op = get_ovl_priv(ovl); 348 struct omap_overlay *ovl;
332 349 struct mgr_priv_data *mp;
333 if (!ovl->manager)
334 continue;
335 350
336 mp = get_mgr_priv(ovl->manager); 351 mgr = omap_dss_get_overlay_manager(i);
352 mp = get_mgr_priv(mgr);
337 353
338 if (!mp->enabled) 354 if (!mp->enabled)
339 continue; 355 continue;
@@ -341,8 +357,15 @@ static bool extra_info_update_ongoing(void)
341 if (!mp->updating) 357 if (!mp->updating)
342 continue; 358 continue;
343 359
344 if (op->extra_info_dirty || op->shadow_extra_info_dirty) 360 if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
345 return true; 361 return true;
362
363 list_for_each_entry(ovl, &mgr->overlays, list) {
364 struct ovl_priv_data *op = get_ovl_priv(ovl);
365
366 if (op->extra_info_dirty || op->shadow_extra_info_dirty)
367 return true;
368 }
346 } 369 }
347 370
348 return false; 371 return false;
@@ -525,11 +548,13 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
525 548
526 oi = &op->info; 549 oi = &op->info;
527 550
551 mp = get_mgr_priv(ovl->manager);
552
528 replication = dss_use_replication(ovl->manager->device, oi->color_mode); 553 replication = dss_use_replication(ovl->manager->device, oi->color_mode);
529 554
530 ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC; 555 ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
531 556
532 r = dispc_ovl_setup(ovl->id, oi, ilace, replication); 557 r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings);
533 if (r) { 558 if (r) {
534 /* 559 /*
535 * We can't do much here, as this function can be called from 560 * We can't do much here, as this function can be called from
@@ -543,8 +568,6 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
543 return; 568 return;
544 } 569 }
545 570
546 mp = get_mgr_priv(ovl->manager);
547
548 op->info_dirty = false; 571 op->info_dirty = false;
549 if (mp->updating) 572 if (mp->updating)
550 op->shadow_info_dirty = true; 573 op->shadow_info_dirty = true;
@@ -601,6 +624,22 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
601 } 624 }
602} 625}
603 626
627static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
628{
629 struct mgr_priv_data *mp = get_mgr_priv(mgr);
630
631 DSSDBGF("%d", mgr->id);
632
633 if (!mp->extra_info_dirty)
634 return;
635
636 dispc_mgr_set_timings(mgr->id, &mp->timings);
637
638 mp->extra_info_dirty = false;
639 if (mp->updating)
640 mp->shadow_extra_info_dirty = true;
641}
642
604static void dss_write_regs_common(void) 643static void dss_write_regs_common(void)
605{ 644{
606 const int num_mgrs = omap_dss_get_num_overlay_managers(); 645 const int num_mgrs = omap_dss_get_num_overlay_managers();
@@ -646,7 +685,7 @@ static void dss_write_regs(void)
646 if (!mp->enabled || mgr_manual_update(mgr) || mp->busy) 685 if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
647 continue; 686 continue;
648 687
649 r = dss_check_settings(mgr, mgr->device); 688 r = dss_check_settings(mgr);
650 if (r) { 689 if (r) {
651 DSSERR("cannot write registers for manager %s: " 690 DSSERR("cannot write registers for manager %s: "
652 "illegal configuration\n", mgr->name); 691 "illegal configuration\n", mgr->name);
@@ -654,6 +693,7 @@ static void dss_write_regs(void)
654 } 693 }
655 694
656 dss_mgr_write_regs(mgr); 695 dss_mgr_write_regs(mgr);
696 dss_mgr_write_regs_extra(mgr);
657 } 697 }
658} 698}
659 699
@@ -693,6 +733,7 @@ static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
693 733
694 mp = get_mgr_priv(mgr); 734 mp = get_mgr_priv(mgr);
695 mp->shadow_info_dirty = false; 735 mp->shadow_info_dirty = false;
736 mp->shadow_extra_info_dirty = false;
696 737
697 list_for_each_entry(ovl, &mgr->overlays, list) { 738 list_for_each_entry(ovl, &mgr->overlays, list) {
698 op = get_ovl_priv(ovl); 739 op = get_ovl_priv(ovl);
@@ -711,7 +752,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
711 752
712 WARN_ON(mp->updating); 753 WARN_ON(mp->updating);
713 754
714 r = dss_check_settings(mgr, mgr->device); 755 r = dss_check_settings(mgr);
715 if (r) { 756 if (r) {
716 DSSERR("cannot start manual update: illegal configuration\n"); 757 DSSERR("cannot start manual update: illegal configuration\n");
717 spin_unlock_irqrestore(&data_lock, flags); 758 spin_unlock_irqrestore(&data_lock, flags);
@@ -719,6 +760,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
719 } 760 }
720 761
721 dss_mgr_write_regs(mgr); 762 dss_mgr_write_regs(mgr);
763 dss_mgr_write_regs_extra(mgr);
722 764
723 dss_write_regs_common(); 765 dss_write_regs_common();
724 766
@@ -857,7 +899,7 @@ int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
857 899
858 spin_lock_irqsave(&data_lock, flags); 900 spin_lock_irqsave(&data_lock, flags);
859 901
860 r = dss_check_settings_apply(mgr, mgr->device); 902 r = dss_check_settings_apply(mgr);
861 if (r) { 903 if (r) {
862 spin_unlock_irqrestore(&data_lock, flags); 904 spin_unlock_irqrestore(&data_lock, flags);
863 DSSERR("failed to apply settings: illegal configuration.\n"); 905 DSSERR("failed to apply settings: illegal configuration.\n");
@@ -918,16 +960,13 @@ static void dss_ovl_setup_fifo(struct omap_overlay *ovl,
918 bool use_fifo_merge) 960 bool use_fifo_merge)
919{ 961{
920 struct ovl_priv_data *op = get_ovl_priv(ovl); 962 struct ovl_priv_data *op = get_ovl_priv(ovl);
921 struct omap_dss_device *dssdev;
922 u32 fifo_low, fifo_high; 963 u32 fifo_low, fifo_high;
923 964
924 if (!op->enabled && !op->enabling) 965 if (!op->enabled && !op->enabling)
925 return; 966 return;
926 967
927 dssdev = ovl->manager->device;
928
929 dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high, 968 dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
930 use_fifo_merge); 969 use_fifo_merge, ovl_manual_update(ovl));
931 970
932 dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high); 971 dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
933} 972}
@@ -1050,7 +1089,7 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
1050 1089
1051 mp->enabled = true; 1090 mp->enabled = true;
1052 1091
1053 r = dss_check_settings(mgr, mgr->device); 1092 r = dss_check_settings(mgr);
1054 if (r) { 1093 if (r) {
1055 DSSERR("failed to enable manager %d: check_settings failed\n", 1094 DSSERR("failed to enable manager %d: check_settings failed\n",
1056 mgr->id); 1095 mgr->id);
@@ -1225,6 +1264,35 @@ err:
1225 return r; 1264 return r;
1226} 1265}
1227 1266
1267static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
1268 struct omap_video_timings *timings)
1269{
1270 struct mgr_priv_data *mp = get_mgr_priv(mgr);
1271
1272 mp->timings = *timings;
1273 mp->extra_info_dirty = true;
1274}
1275
1276void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
1277 struct omap_video_timings *timings)
1278{
1279 unsigned long flags;
1280
1281 mutex_lock(&apply_lock);
1282
1283 spin_lock_irqsave(&data_lock, flags);
1284
1285 dss_apply_mgr_timings(mgr, timings);
1286
1287 dss_write_regs();
1288 dss_set_go_bits();
1289
1290 spin_unlock_irqrestore(&data_lock, flags);
1291
1292 wait_pending_extra_info_updates();
1293
1294 mutex_unlock(&apply_lock);
1295}
1228 1296
1229int dss_ovl_set_info(struct omap_overlay *ovl, 1297int dss_ovl_set_info(struct omap_overlay *ovl,
1230 struct omap_overlay_info *info) 1298 struct omap_overlay_info *info)
@@ -1393,7 +1461,7 @@ int dss_ovl_enable(struct omap_overlay *ovl)
1393 1461
1394 op->enabling = true; 1462 op->enabling = true;
1395 1463
1396 r = dss_check_settings(ovl->manager, ovl->manager->device); 1464 r = dss_check_settings(ovl->manager);
1397 if (r) { 1465 if (r) {
1398 DSSERR("failed to enable overlay %d: check_settings failed\n", 1466 DSSERR("failed to enable overlay %d: check_settings failed\n",
1399 ovl->id); 1467 ovl->id);
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index e8a120771ac6..5066eee10ccf 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -43,6 +43,8 @@ static struct {
43 43
44 struct regulator *vdds_dsi_reg; 44 struct regulator *vdds_dsi_reg;
45 struct regulator *vdds_sdi_reg; 45 struct regulator *vdds_sdi_reg;
46
47 const char *default_display_name;
46} core; 48} core;
47 49
48static char *def_disp_name; 50static char *def_disp_name;
@@ -54,9 +56,6 @@ bool dss_debug;
54module_param_named(debug, dss_debug, bool, 0644); 56module_param_named(debug, dss_debug, bool, 0644);
55#endif 57#endif
56 58
57static int omap_dss_register_device(struct omap_dss_device *);
58static void omap_dss_unregister_device(struct omap_dss_device *);
59
60/* REGULATORS */ 59/* REGULATORS */
61 60
62struct regulator *dss_get_vdds_dsi(void) 61struct regulator *dss_get_vdds_dsi(void)
@@ -87,6 +86,51 @@ struct regulator *dss_get_vdds_sdi(void)
87 return reg; 86 return reg;
88} 87}
89 88
89int dss_get_ctx_loss_count(struct device *dev)
90{
91 struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
92 int cnt;
93
94 if (!board_data->get_context_loss_count)
95 return -ENOENT;
96
97 cnt = board_data->get_context_loss_count(dev);
98
99 WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
100
101 return cnt;
102}
103
104int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
105{
106 struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
107
108 if (!board_data->dsi_enable_pads)
109 return -ENOENT;
110
111 return board_data->dsi_enable_pads(dsi_id, lane_mask);
112}
113
114void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
115{
116 struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
117
118 if (!board_data->dsi_enable_pads)
119 return;
120
121 return board_data->dsi_disable_pads(dsi_id, lane_mask);
122}
123
124int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
125{
126 struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
127
128 if (pdata->set_min_bus_tput)
129 return pdata->set_min_bus_tput(dev, tput);
130 else
131 return 0;
132}
133
90#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) 134#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
91static int dss_debug_show(struct seq_file *s, void *unused) 135static int dss_debug_show(struct seq_file *s, void *unused)
92{ 136{
@@ -121,34 +165,6 @@ static int dss_initialize_debugfs(void)
121 debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, 165 debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
122 &dss_debug_dump_clocks, &dss_debug_fops); 166 &dss_debug_dump_clocks, &dss_debug_fops);
123 167
124#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
125 debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
126 &dispc_dump_irqs, &dss_debug_fops);
127#endif
128
129#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
130 dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
131#endif
132
133 debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
134 &dss_dump_regs, &dss_debug_fops);
135 debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
136 &dispc_dump_regs, &dss_debug_fops);
137#ifdef CONFIG_OMAP2_DSS_RFBI
138 debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
139 &rfbi_dump_regs, &dss_debug_fops);
140#endif
141#ifdef CONFIG_OMAP2_DSS_DSI
142 dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
143#endif
144#ifdef CONFIG_OMAP2_DSS_VENC
145 debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
146 &venc_dump_regs, &dss_debug_fops);
147#endif
148#ifdef CONFIG_OMAP4_DSS_HDMI
149 debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir,
150 &hdmi_dump_regs, &dss_debug_fops);
151#endif
152 return 0; 168 return 0;
153} 169}
154 170
@@ -157,6 +173,19 @@ static void dss_uninitialize_debugfs(void)
157 if (dss_debugfs_dir) 173 if (dss_debugfs_dir)
158 debugfs_remove_recursive(dss_debugfs_dir); 174 debugfs_remove_recursive(dss_debugfs_dir);
159} 175}
176
177int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
178{
179 struct dentry *d;
180
181 d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
182 write, &dss_debug_fops);
183
184 if (IS_ERR(d))
185 return PTR_ERR(d);
186
187 return 0;
188}
160#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */ 189#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
161static inline int dss_initialize_debugfs(void) 190static inline int dss_initialize_debugfs(void)
162{ 191{
@@ -165,14 +194,17 @@ static inline int dss_initialize_debugfs(void)
165static inline void dss_uninitialize_debugfs(void) 194static inline void dss_uninitialize_debugfs(void)
166{ 195{
167} 196}
197int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
198{
199 return 0;
200}
168#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */ 201#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
169 202
170/* PLATFORM DEVICE */ 203/* PLATFORM DEVICE */
171static int omap_dss_probe(struct platform_device *pdev) 204static int __init omap_dss_probe(struct platform_device *pdev)
172{ 205{
173 struct omap_dss_board_info *pdata = pdev->dev.platform_data; 206 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
174 int r; 207 int r;
175 int i;
176 208
177 core.pdev = pdev; 209 core.pdev = pdev;
178 210
@@ -187,28 +219,13 @@ static int omap_dss_probe(struct platform_device *pdev)
187 if (r) 219 if (r)
188 goto err_debugfs; 220 goto err_debugfs;
189 221
190 for (i = 0; i < pdata->num_devices; ++i) { 222 if (def_disp_name)
191 struct omap_dss_device *dssdev = pdata->devices[i]; 223 core.default_display_name = def_disp_name;
192 224 else if (pdata->default_device)
193 r = omap_dss_register_device(dssdev); 225 core.default_display_name = pdata->default_device->name;
194 if (r) {
195 DSSERR("device %d %s register failed %d\n", i,
196 dssdev->name ?: "unnamed", r);
197
198 while (--i >= 0)
199 omap_dss_unregister_device(pdata->devices[i]);
200
201 goto err_register;
202 }
203
204 if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
205 pdata->default_device = dssdev;
206 }
207 226
208 return 0; 227 return 0;
209 228
210err_register:
211 dss_uninitialize_debugfs();
212err_debugfs: 229err_debugfs:
213 230
214 return r; 231 return r;
@@ -216,17 +233,11 @@ err_debugfs:
216 233
217static int omap_dss_remove(struct platform_device *pdev) 234static int omap_dss_remove(struct platform_device *pdev)
218{ 235{
219 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
220 int i;
221
222 dss_uninitialize_debugfs(); 236 dss_uninitialize_debugfs();
223 237
224 dss_uninit_overlays(pdev); 238 dss_uninit_overlays(pdev);
225 dss_uninit_overlay_managers(pdev); 239 dss_uninit_overlay_managers(pdev);
226 240
227 for (i = 0; i < pdata->num_devices; ++i)
228 omap_dss_unregister_device(pdata->devices[i]);
229
230 return 0; 241 return 0;
231} 242}
232 243
@@ -251,7 +262,6 @@ static int omap_dss_resume(struct platform_device *pdev)
251} 262}
252 263
253static struct platform_driver omap_dss_driver = { 264static struct platform_driver omap_dss_driver = {
254 .probe = omap_dss_probe,
255 .remove = omap_dss_remove, 265 .remove = omap_dss_remove,
256 .shutdown = omap_dss_shutdown, 266 .shutdown = omap_dss_shutdown,
257 .suspend = omap_dss_suspend, 267 .suspend = omap_dss_suspend,
@@ -326,7 +336,6 @@ static int dss_driver_probe(struct device *dev)
326 int r; 336 int r;
327 struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver); 337 struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
328 struct omap_dss_device *dssdev = to_dss_device(dev); 338 struct omap_dss_device *dssdev = to_dss_device(dev);
329 struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
330 bool force; 339 bool force;
331 340
332 DSSDBG("driver_probe: dev %s/%s, drv %s\n", 341 DSSDBG("driver_probe: dev %s/%s, drv %s\n",
@@ -335,7 +344,8 @@ static int dss_driver_probe(struct device *dev)
335 344
336 dss_init_device(core.pdev, dssdev); 345 dss_init_device(core.pdev, dssdev);
337 346
338 force = pdata->default_device == dssdev; 347 force = core.default_display_name &&
348 strcmp(core.default_display_name, dssdev->name) == 0;
339 dss_recheck_connections(dssdev, force); 349 dss_recheck_connections(dssdev, force);
340 350
341 r = dssdrv->probe(dssdev); 351 r = dssdrv->probe(dssdev);
@@ -381,6 +391,8 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
381 if (dssdriver->get_recommended_bpp == NULL) 391 if (dssdriver->get_recommended_bpp == NULL)
382 dssdriver->get_recommended_bpp = 392 dssdriver->get_recommended_bpp =
383 omapdss_default_get_recommended_bpp; 393 omapdss_default_get_recommended_bpp;
394 if (dssdriver->get_timings == NULL)
395 dssdriver->get_timings = omapdss_default_get_timings;
384 396
385 return driver_register(&dssdriver->driver); 397 return driver_register(&dssdriver->driver);
386} 398}
@@ -427,27 +439,38 @@ static void omap_dss_dev_release(struct device *dev)
427 reset_device(dev, 0); 439 reset_device(dev, 0);
428} 440}
429 441
430static int omap_dss_register_device(struct omap_dss_device *dssdev) 442int omap_dss_register_device(struct omap_dss_device *dssdev,
443 struct device *parent, int disp_num)
431{ 444{
432 static int dev_num;
433
434 WARN_ON(!dssdev->driver_name); 445 WARN_ON(!dssdev->driver_name);
435 446
436 reset_device(&dssdev->dev, 1); 447 reset_device(&dssdev->dev, 1);
437 dssdev->dev.bus = &dss_bus_type; 448 dssdev->dev.bus = &dss_bus_type;
438 dssdev->dev.parent = &dss_bus; 449 dssdev->dev.parent = parent;
439 dssdev->dev.release = omap_dss_dev_release; 450 dssdev->dev.release = omap_dss_dev_release;
440 dev_set_name(&dssdev->dev, "display%d", dev_num++); 451 dev_set_name(&dssdev->dev, "display%d", disp_num);
441 return device_register(&dssdev->dev); 452 return device_register(&dssdev->dev);
442} 453}
443 454
444static void omap_dss_unregister_device(struct omap_dss_device *dssdev) 455void omap_dss_unregister_device(struct omap_dss_device *dssdev)
445{ 456{
446 device_unregister(&dssdev->dev); 457 device_unregister(&dssdev->dev);
447} 458}
448 459
460static int dss_unregister_dss_dev(struct device *dev, void *data)
461{
462 struct omap_dss_device *dssdev = to_dss_device(dev);
463 omap_dss_unregister_device(dssdev);
464 return 0;
465}
466
467void omap_dss_unregister_child_devices(struct device *parent)
468{
469 device_for_each_child(parent, NULL, dss_unregister_dss_dev);
470}
471
449/* BUS */ 472/* BUS */
450static int omap_dss_bus_register(void) 473static int __init omap_dss_bus_register(void)
451{ 474{
452 int r; 475 int r;
453 476
@@ -469,12 +492,56 @@ static int omap_dss_bus_register(void)
469} 492}
470 493
471/* INIT */ 494/* INIT */
495static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
496#ifdef CONFIG_OMAP2_DSS_DPI
497 dpi_init_platform_driver,
498#endif
499#ifdef CONFIG_OMAP2_DSS_SDI
500 sdi_init_platform_driver,
501#endif
502#ifdef CONFIG_OMAP2_DSS_RFBI
503 rfbi_init_platform_driver,
504#endif
505#ifdef CONFIG_OMAP2_DSS_VENC
506 venc_init_platform_driver,
507#endif
508#ifdef CONFIG_OMAP2_DSS_DSI
509 dsi_init_platform_driver,
510#endif
511#ifdef CONFIG_OMAP4_DSS_HDMI
512 hdmi_init_platform_driver,
513#endif
514};
515
516static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
517#ifdef CONFIG_OMAP2_DSS_DPI
518 dpi_uninit_platform_driver,
519#endif
520#ifdef CONFIG_OMAP2_DSS_SDI
521 sdi_uninit_platform_driver,
522#endif
523#ifdef CONFIG_OMAP2_DSS_RFBI
524 rfbi_uninit_platform_driver,
525#endif
526#ifdef CONFIG_OMAP2_DSS_VENC
527 venc_uninit_platform_driver,
528#endif
529#ifdef CONFIG_OMAP2_DSS_DSI
530 dsi_uninit_platform_driver,
531#endif
532#ifdef CONFIG_OMAP4_DSS_HDMI
533 hdmi_uninit_platform_driver,
534#endif
535};
536
537static bool dss_output_drv_loaded[ARRAY_SIZE(dss_output_drv_reg_funcs)];
472 538
473static int __init omap_dss_register_drivers(void) 539static int __init omap_dss_register_drivers(void)
474{ 540{
475 int r; 541 int r;
542 int i;
476 543
477 r = platform_driver_register(&omap_dss_driver); 544 r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
478 if (r) 545 if (r)
479 return r; 546 return r;
480 547
@@ -490,40 +557,18 @@ static int __init omap_dss_register_drivers(void)
490 goto err_dispc; 557 goto err_dispc;
491 } 558 }
492 559
493 r = rfbi_init_platform_driver(); 560 /*
494 if (r) { 561 * It's ok if the output-driver register fails. It happens, for example,
495 DSSERR("Failed to initialize rfbi platform driver\n"); 562 * when there is no output-device (e.g. SDI for OMAP4).
496 goto err_rfbi; 563 */
497 } 564 for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
498 565 r = dss_output_drv_reg_funcs[i]();
499 r = venc_init_platform_driver(); 566 if (r == 0)
500 if (r) { 567 dss_output_drv_loaded[i] = true;
501 DSSERR("Failed to initialize venc platform driver\n");
502 goto err_venc;
503 }
504
505 r = dsi_init_platform_driver();
506 if (r) {
507 DSSERR("Failed to initialize DSI platform driver\n");
508 goto err_dsi;
509 }
510
511 r = hdmi_init_platform_driver();
512 if (r) {
513 DSSERR("Failed to initialize hdmi\n");
514 goto err_hdmi;
515 } 568 }
516 569
517 return 0; 570 return 0;
518 571
519err_hdmi:
520 dsi_uninit_platform_driver();
521err_dsi:
522 venc_uninit_platform_driver();
523err_venc:
524 rfbi_uninit_platform_driver();
525err_rfbi:
526 dispc_uninit_platform_driver();
527err_dispc: 572err_dispc:
528 dss_uninit_platform_driver(); 573 dss_uninit_platform_driver();
529err_dss: 574err_dss:
@@ -534,10 +579,13 @@ err_dss:
534 579
535static void __exit omap_dss_unregister_drivers(void) 580static void __exit omap_dss_unregister_drivers(void)
536{ 581{
537 hdmi_uninit_platform_driver(); 582 int i;
538 dsi_uninit_platform_driver(); 583
539 venc_uninit_platform_driver(); 584 for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i) {
540 rfbi_uninit_platform_driver(); 585 if (dss_output_drv_loaded[i])
586 dss_output_drv_unreg_funcs[i]();
587 }
588
541 dispc_uninit_platform_driver(); 589 dispc_uninit_platform_driver();
542 dss_uninit_platform_driver(); 590 dss_uninit_platform_driver();
543 591
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index ee30937482e1..4749ac356469 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -131,23 +131,6 @@ static inline u32 dispc_read_reg(const u16 idx)
131 return __raw_readl(dispc.base + idx); 131 return __raw_readl(dispc.base + idx);
132} 132}
133 133
134static int dispc_get_ctx_loss_count(void)
135{
136 struct device *dev = &dispc.pdev->dev;
137 struct omap_display_platform_data *pdata = dev->platform_data;
138 struct omap_dss_board_info *board_data = pdata->board_data;
139 int cnt;
140
141 if (!board_data->get_context_loss_count)
142 return -ENOENT;
143
144 cnt = board_data->get_context_loss_count(dev);
145
146 WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
147
148 return cnt;
149}
150
151#define SR(reg) \ 134#define SR(reg) \
152 dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) 135 dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
153#define RR(reg) \ 136#define RR(reg) \
@@ -251,7 +234,7 @@ static void dispc_save_context(void)
251 if (dss_has_feature(FEAT_CORE_CLK_DIV)) 234 if (dss_has_feature(FEAT_CORE_CLK_DIV))
252 SR(DIVISOR); 235 SR(DIVISOR);
253 236
254 dispc.ctx_loss_cnt = dispc_get_ctx_loss_count(); 237 dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev);
255 dispc.ctx_valid = true; 238 dispc.ctx_valid = true;
256 239
257 DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt); 240 DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
@@ -266,7 +249,7 @@ static void dispc_restore_context(void)
266 if (!dispc.ctx_valid) 249 if (!dispc.ctx_valid)
267 return; 250 return;
268 251
269 ctx = dispc_get_ctx_loss_count(); 252 ctx = dss_get_ctx_loss_count(&dispc.pdev->dev);
270 253
271 if (ctx >= 0 && ctx == dispc.ctx_loss_cnt) 254 if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
272 return; 255 return;
@@ -413,14 +396,6 @@ static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
413 return false; 396 return false;
414} 397}
415 398
416static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
417{
418 struct omap_overlay_manager *mgr =
419 omap_dss_get_overlay_manager(channel);
420
421 return mgr ? mgr->device : NULL;
422}
423
424u32 dispc_mgr_get_vsync_irq(enum omap_channel channel) 399u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
425{ 400{
426 switch (channel) { 401 switch (channel) {
@@ -432,6 +407,7 @@ u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
432 return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; 407 return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
433 default: 408 default:
434 BUG(); 409 BUG();
410 return 0;
435 } 411 }
436} 412}
437 413
@@ -446,6 +422,7 @@ u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
446 return 0; 422 return 0;
447 default: 423 default:
448 BUG(); 424 BUG();
425 return 0;
449 } 426 }
450} 427}
451 428
@@ -764,7 +741,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
764 case OMAP_DSS_COLOR_XRGB16_1555: 741 case OMAP_DSS_COLOR_XRGB16_1555:
765 m = 0xf; break; 742 m = 0xf; break;
766 default: 743 default:
767 BUG(); break; 744 BUG(); return;
768 } 745 }
769 } else { 746 } else {
770 switch (color_mode) { 747 switch (color_mode) {
@@ -801,13 +778,25 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
801 case OMAP_DSS_COLOR_XRGB16_1555: 778 case OMAP_DSS_COLOR_XRGB16_1555:
802 m = 0xf; break; 779 m = 0xf; break;
803 default: 780 default:
804 BUG(); break; 781 BUG(); return;
805 } 782 }
806 } 783 }
807 784
808 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1); 785 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
809} 786}
810 787
788static void dispc_ovl_configure_burst_type(enum omap_plane plane,
789 enum omap_dss_rotation_type rotation_type)
790{
791 if (dss_has_feature(FEAT_BURST_2D) == 0)
792 return;
793
794 if (rotation_type == OMAP_DSS_ROT_TILER)
795 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
796 else
797 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
798}
799
811void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel) 800void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
812{ 801{
813 int shift; 802 int shift;
@@ -845,6 +834,7 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
845 break; 834 break;
846 default: 835 default:
847 BUG(); 836 BUG();
837 return;
848 } 838 }
849 839
850 val = FLD_MOD(val, chan, shift, shift); 840 val = FLD_MOD(val, chan, shift, shift);
@@ -872,6 +862,7 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
872 break; 862 break;
873 default: 863 default:
874 BUG(); 864 BUG();
865 return 0;
875 } 866 }
876 867
877 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); 868 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
@@ -983,20 +974,13 @@ static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable)
983 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift); 974 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
984} 975}
985 976
986void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height) 977static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
978 u16 height)
987{ 979{
988 u32 val; 980 u32 val;
989 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
990 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
991 dispc_write_reg(DISPC_SIZE_MGR(channel), val);
992}
993 981
994void dispc_set_digit_size(u16 width, u16 height)
995{
996 u32 val;
997 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
998 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 982 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
999 dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val); 983 dispc_write_reg(DISPC_SIZE_MGR(channel), val);
1000} 984}
1001 985
1002static void dispc_read_plane_fifo_sizes(void) 986static void dispc_read_plane_fifo_sizes(void)
@@ -1063,7 +1047,8 @@ void dispc_enable_fifomerge(bool enable)
1063} 1047}
1064 1048
1065void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, 1049void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
1066 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge) 1050 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
1051 bool manual_update)
1067{ 1052{
1068 /* 1053 /*
1069 * All sizes are in bytes. Both the buffer and burst are made of 1054 * All sizes are in bytes. Both the buffer and burst are made of
@@ -1091,7 +1076,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
1091 * combined fifo size 1076 * combined fifo size
1092 */ 1077 */
1093 1078
1094 if (dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) { 1079 if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
1095 *fifo_low = ovl_fifo_size - burst_size * 2; 1080 *fifo_low = ovl_fifo_size - burst_size * 2;
1096 *fifo_high = total_fifo_size - burst_size; 1081 *fifo_high = total_fifo_size - burst_size;
1097 } else { 1082 } else {
@@ -1185,6 +1170,94 @@ static void dispc_ovl_set_scale_param(enum omap_plane plane,
1185 dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp); 1170 dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
1186} 1171}
1187 1172
1173static void dispc_ovl_set_accu_uv(enum omap_plane plane,
1174 u16 orig_width, u16 orig_height, u16 out_width, u16 out_height,
1175 bool ilace, enum omap_color_mode color_mode, u8 rotation)
1176{
1177 int h_accu2_0, h_accu2_1;
1178 int v_accu2_0, v_accu2_1;
1179 int chroma_hinc, chroma_vinc;
1180 int idx;
1181
1182 struct accu {
1183 s8 h0_m, h0_n;
1184 s8 h1_m, h1_n;
1185 s8 v0_m, v0_n;
1186 s8 v1_m, v1_n;
1187 };
1188
1189 const struct accu *accu_table;
1190 const struct accu *accu_val;
1191
1192 static const struct accu accu_nv12[4] = {
1193 { 0, 1, 0, 1 , -1, 2, 0, 1 },
1194 { 1, 2, -3, 4 , 0, 1, 0, 1 },
1195 { -1, 1, 0, 1 , -1, 2, 0, 1 },
1196 { -1, 2, -1, 2 , -1, 1, 0, 1 },
1197 };
1198
1199 static const struct accu accu_nv12_ilace[4] = {
1200 { 0, 1, 0, 1 , -3, 4, -1, 4 },
1201 { -1, 4, -3, 4 , 0, 1, 0, 1 },
1202 { -1, 1, 0, 1 , -1, 4, -3, 4 },
1203 { -3, 4, -3, 4 , -1, 1, 0, 1 },
1204 };
1205
1206 static const struct accu accu_yuv[4] = {
1207 { 0, 1, 0, 1, 0, 1, 0, 1 },
1208 { 0, 1, 0, 1, 0, 1, 0, 1 },
1209 { -1, 1, 0, 1, 0, 1, 0, 1 },
1210 { 0, 1, 0, 1, -1, 1, 0, 1 },
1211 };
1212
1213 switch (rotation) {
1214 case OMAP_DSS_ROT_0:
1215 idx = 0;
1216 break;
1217 case OMAP_DSS_ROT_90:
1218 idx = 1;
1219 break;
1220 case OMAP_DSS_ROT_180:
1221 idx = 2;
1222 break;
1223 case OMAP_DSS_ROT_270:
1224 idx = 3;
1225 break;
1226 default:
1227 BUG();
1228 return;
1229 }
1230
1231 switch (color_mode) {
1232 case OMAP_DSS_COLOR_NV12:
1233 if (ilace)
1234 accu_table = accu_nv12_ilace;
1235 else
1236 accu_table = accu_nv12;
1237 break;
1238 case OMAP_DSS_COLOR_YUV2:
1239 case OMAP_DSS_COLOR_UYVY:
1240 accu_table = accu_yuv;
1241 break;
1242 default:
1243 BUG();
1244 return;
1245 }
1246
1247 accu_val = &accu_table[idx];
1248
1249 chroma_hinc = 1024 * orig_width / out_width;
1250 chroma_vinc = 1024 * orig_height / out_height;
1251
1252 h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024;
1253 h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024;
1254 v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
1255 v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
1256
1257 dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0);
1258 dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1);
1259}
1260
1188static void dispc_ovl_set_scaling_common(enum omap_plane plane, 1261static void dispc_ovl_set_scaling_common(enum omap_plane plane,
1189 u16 orig_width, u16 orig_height, 1262 u16 orig_width, u16 orig_height,
1190 u16 out_width, u16 out_height, 1263 u16 out_width, u16 out_height,
@@ -1258,6 +1331,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1258 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8); 1331 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
1259 return; 1332 return;
1260 } 1333 }
1334
1335 dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
1336 out_height, ilace, color_mode, rotation);
1337
1261 switch (color_mode) { 1338 switch (color_mode) {
1262 case OMAP_DSS_COLOR_NV12: 1339 case OMAP_DSS_COLOR_NV12:
1263 /* UV is subsampled by 2 vertically*/ 1340 /* UV is subsampled by 2 vertically*/
@@ -1280,6 +1357,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1280 break; 1357 break;
1281 default: 1358 default:
1282 BUG(); 1359 BUG();
1360 return;
1283 } 1361 }
1284 1362
1285 if (out_width != orig_width) 1363 if (out_width != orig_width)
@@ -1297,9 +1375,6 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1297 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5); 1375 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
1298 /* set V scaling */ 1376 /* set V scaling */
1299 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6); 1377 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
1300
1301 dispc_ovl_set_vid_accu2_0(plane, 0x80, 0);
1302 dispc_ovl_set_vid_accu2_1(plane, 0x80, 0);
1303} 1378}
1304 1379
1305static void dispc_ovl_set_scaling(enum omap_plane plane, 1380static void dispc_ovl_set_scaling(enum omap_plane plane,
@@ -1410,6 +1485,7 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode)
1410 return 32; 1485 return 32;
1411 default: 1486 default:
1412 BUG(); 1487 BUG();
1488 return 0;
1413 } 1489 }
1414} 1490}
1415 1491
@@ -1423,6 +1499,7 @@ static s32 pixinc(int pixels, u8 ps)
1423 return 1 - (-pixels + 1) * ps; 1499 return 1 - (-pixels + 1) * ps;
1424 else 1500 else
1425 BUG(); 1501 BUG();
1502 return 0;
1426} 1503}
1427 1504
1428static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, 1505static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
@@ -1431,7 +1508,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
1431 enum omap_color_mode color_mode, bool fieldmode, 1508 enum omap_color_mode color_mode, bool fieldmode,
1432 unsigned int field_offset, 1509 unsigned int field_offset,
1433 unsigned *offset0, unsigned *offset1, 1510 unsigned *offset0, unsigned *offset1,
1434 s32 *row_inc, s32 *pix_inc) 1511 s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
1435{ 1512{
1436 u8 ps; 1513 u8 ps;
1437 1514
@@ -1477,10 +1554,10 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
1477 else 1554 else
1478 *offset0 = 0; 1555 *offset0 = 0;
1479 1556
1480 *row_inc = pixinc(1 + (screen_width - width) + 1557 *row_inc = pixinc(1 +
1481 (fieldmode ? screen_width : 0), 1558 (y_predecim * screen_width - x_predecim * width) +
1482 ps); 1559 (fieldmode ? screen_width : 0), ps);
1483 *pix_inc = pixinc(1, ps); 1560 *pix_inc = pixinc(x_predecim, ps);
1484 break; 1561 break;
1485 1562
1486 case OMAP_DSS_ROT_0 + 4: 1563 case OMAP_DSS_ROT_0 + 4:
@@ -1498,14 +1575,15 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
1498 *offset0 = field_offset * screen_width * ps; 1575 *offset0 = field_offset * screen_width * ps;
1499 else 1576 else
1500 *offset0 = 0; 1577 *offset0 = 0;
1501 *row_inc = pixinc(1 - (screen_width + width) - 1578 *row_inc = pixinc(1 -
1502 (fieldmode ? screen_width : 0), 1579 (y_predecim * screen_width + x_predecim * width) -
1503 ps); 1580 (fieldmode ? screen_width : 0), ps);
1504 *pix_inc = pixinc(1, ps); 1581 *pix_inc = pixinc(x_predecim, ps);
1505 break; 1582 break;
1506 1583
1507 default: 1584 default:
1508 BUG(); 1585 BUG();
1586 return;
1509 } 1587 }
1510} 1588}
1511 1589
@@ -1515,7 +1593,7 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1515 enum omap_color_mode color_mode, bool fieldmode, 1593 enum omap_color_mode color_mode, bool fieldmode,
1516 unsigned int field_offset, 1594 unsigned int field_offset,
1517 unsigned *offset0, unsigned *offset1, 1595 unsigned *offset0, unsigned *offset1,
1518 s32 *row_inc, s32 *pix_inc) 1596 s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
1519{ 1597{
1520 u8 ps; 1598 u8 ps;
1521 u16 fbw, fbh; 1599 u16 fbw, fbh;
@@ -1557,10 +1635,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1557 *offset0 = *offset1 + field_offset * screen_width * ps; 1635 *offset0 = *offset1 + field_offset * screen_width * ps;
1558 else 1636 else
1559 *offset0 = *offset1; 1637 *offset0 = *offset1;
1560 *row_inc = pixinc(1 + (screen_width - fbw) + 1638 *row_inc = pixinc(1 +
1561 (fieldmode ? screen_width : 0), 1639 (y_predecim * screen_width - fbw * x_predecim) +
1562 ps); 1640 (fieldmode ? screen_width : 0), ps);
1563 *pix_inc = pixinc(1, ps); 1641 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1642 color_mode == OMAP_DSS_COLOR_UYVY)
1643 *pix_inc = pixinc(x_predecim, 2 * ps);
1644 else
1645 *pix_inc = pixinc(x_predecim, ps);
1564 break; 1646 break;
1565 case OMAP_DSS_ROT_90: 1647 case OMAP_DSS_ROT_90:
1566 *offset1 = screen_width * (fbh - 1) * ps; 1648 *offset1 = screen_width * (fbh - 1) * ps;
@@ -1568,9 +1650,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1568 *offset0 = *offset1 + field_offset * ps; 1650 *offset0 = *offset1 + field_offset * ps;
1569 else 1651 else
1570 *offset0 = *offset1; 1652 *offset0 = *offset1;
1571 *row_inc = pixinc(screen_width * (fbh - 1) + 1 + 1653 *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) +
1572 (fieldmode ? 1 : 0), ps); 1654 y_predecim + (fieldmode ? 1 : 0), ps);
1573 *pix_inc = pixinc(-screen_width, ps); 1655 *pix_inc = pixinc(-x_predecim * screen_width, ps);
1574 break; 1656 break;
1575 case OMAP_DSS_ROT_180: 1657 case OMAP_DSS_ROT_180:
1576 *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps; 1658 *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
@@ -1579,10 +1661,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1579 else 1661 else
1580 *offset0 = *offset1; 1662 *offset0 = *offset1;
1581 *row_inc = pixinc(-1 - 1663 *row_inc = pixinc(-1 -
1582 (screen_width - fbw) - 1664 (y_predecim * screen_width - fbw * x_predecim) -
1583 (fieldmode ? screen_width : 0), 1665 (fieldmode ? screen_width : 0), ps);
1584 ps); 1666 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1585 *pix_inc = pixinc(-1, ps); 1667 color_mode == OMAP_DSS_COLOR_UYVY)
1668 *pix_inc = pixinc(-x_predecim, 2 * ps);
1669 else
1670 *pix_inc = pixinc(-x_predecim, ps);
1586 break; 1671 break;
1587 case OMAP_DSS_ROT_270: 1672 case OMAP_DSS_ROT_270:
1588 *offset1 = (fbw - 1) * ps; 1673 *offset1 = (fbw - 1) * ps;
@@ -1590,9 +1675,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1590 *offset0 = *offset1 - field_offset * ps; 1675 *offset0 = *offset1 - field_offset * ps;
1591 else 1676 else
1592 *offset0 = *offset1; 1677 *offset0 = *offset1;
1593 *row_inc = pixinc(-screen_width * (fbh - 1) - 1 - 1678 *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) -
1594 (fieldmode ? 1 : 0), ps); 1679 y_predecim - (fieldmode ? 1 : 0), ps);
1595 *pix_inc = pixinc(screen_width, ps); 1680 *pix_inc = pixinc(x_predecim * screen_width, ps);
1596 break; 1681 break;
1597 1682
1598 /* mirroring */ 1683 /* mirroring */
@@ -1602,10 +1687,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1602 *offset0 = *offset1 + field_offset * screen_width * ps; 1687 *offset0 = *offset1 + field_offset * screen_width * ps;
1603 else 1688 else
1604 *offset0 = *offset1; 1689 *offset0 = *offset1;
1605 *row_inc = pixinc(screen_width * 2 - 1 + 1690 *row_inc = pixinc(y_predecim * screen_width * 2 - 1 +
1606 (fieldmode ? screen_width : 0), 1691 (fieldmode ? screen_width : 0),
1607 ps); 1692 ps);
1608 *pix_inc = pixinc(-1, ps); 1693 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1694 color_mode == OMAP_DSS_COLOR_UYVY)
1695 *pix_inc = pixinc(-x_predecim, 2 * ps);
1696 else
1697 *pix_inc = pixinc(-x_predecim, ps);
1609 break; 1698 break;
1610 1699
1611 case OMAP_DSS_ROT_90 + 4: 1700 case OMAP_DSS_ROT_90 + 4:
@@ -1614,10 +1703,10 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1614 *offset0 = *offset1 + field_offset * ps; 1703 *offset0 = *offset1 + field_offset * ps;
1615 else 1704 else
1616 *offset0 = *offset1; 1705 *offset0 = *offset1;
1617 *row_inc = pixinc(-screen_width * (fbh - 1) + 1 + 1706 *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) +
1618 (fieldmode ? 1 : 0), 1707 y_predecim + (fieldmode ? 1 : 0),
1619 ps); 1708 ps);
1620 *pix_inc = pixinc(screen_width, ps); 1709 *pix_inc = pixinc(x_predecim * screen_width, ps);
1621 break; 1710 break;
1622 1711
1623 case OMAP_DSS_ROT_180 + 4: 1712 case OMAP_DSS_ROT_180 + 4:
@@ -1626,10 +1715,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1626 *offset0 = *offset1 - field_offset * screen_width * ps; 1715 *offset0 = *offset1 - field_offset * screen_width * ps;
1627 else 1716 else
1628 *offset0 = *offset1; 1717 *offset0 = *offset1;
1629 *row_inc = pixinc(1 - screen_width * 2 - 1718 *row_inc = pixinc(1 - y_predecim * screen_width * 2 -
1630 (fieldmode ? screen_width : 0), 1719 (fieldmode ? screen_width : 0),
1631 ps); 1720 ps);
1632 *pix_inc = pixinc(1, ps); 1721 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1722 color_mode == OMAP_DSS_COLOR_UYVY)
1723 *pix_inc = pixinc(x_predecim, 2 * ps);
1724 else
1725 *pix_inc = pixinc(x_predecim, ps);
1633 break; 1726 break;
1634 1727
1635 case OMAP_DSS_ROT_270 + 4: 1728 case OMAP_DSS_ROT_270 + 4:
@@ -1638,34 +1731,130 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1638 *offset0 = *offset1 - field_offset * ps; 1731 *offset0 = *offset1 - field_offset * ps;
1639 else 1732 else
1640 *offset0 = *offset1; 1733 *offset0 = *offset1;
1641 *row_inc = pixinc(screen_width * (fbh - 1) - 1 - 1734 *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) -
1642 (fieldmode ? 1 : 0), 1735 y_predecim - (fieldmode ? 1 : 0),
1643 ps); 1736 ps);
1644 *pix_inc = pixinc(-screen_width, ps); 1737 *pix_inc = pixinc(-x_predecim * screen_width, ps);
1645 break; 1738 break;
1646 1739
1647 default: 1740 default:
1648 BUG(); 1741 BUG();
1742 return;
1743 }
1744}
1745
1746static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
1747 enum omap_color_mode color_mode, bool fieldmode,
1748 unsigned int field_offset, unsigned *offset0, unsigned *offset1,
1749 s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
1750{
1751 u8 ps;
1752
1753 switch (color_mode) {
1754 case OMAP_DSS_COLOR_CLUT1:
1755 case OMAP_DSS_COLOR_CLUT2:
1756 case OMAP_DSS_COLOR_CLUT4:
1757 case OMAP_DSS_COLOR_CLUT8:
1758 BUG();
1759 return;
1760 default:
1761 ps = color_mode_to_bpp(color_mode) / 8;
1762 break;
1649 } 1763 }
1764
1765 DSSDBG("scrw %d, width %d\n", screen_width, width);
1766
1767 /*
1768 * field 0 = even field = bottom field
1769 * field 1 = odd field = top field
1770 */
1771 *offset1 = 0;
1772 if (field_offset)
1773 *offset0 = *offset1 + field_offset * screen_width * ps;
1774 else
1775 *offset0 = *offset1;
1776 *row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) +
1777 (fieldmode ? screen_width : 0), ps);
1778 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1779 color_mode == OMAP_DSS_COLOR_UYVY)
1780 *pix_inc = pixinc(x_predecim, 2 * ps);
1781 else
1782 *pix_inc = pixinc(x_predecim, ps);
1650} 1783}
1651 1784
1652static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width, 1785/*
1786 * This function is used to avoid synclosts in OMAP3, because of some
1787 * undocumented horizontal position and timing related limitations.
1788 */
1789static int check_horiz_timing_omap3(enum omap_channel channel,
1790 const struct omap_video_timings *t, u16 pos_x,
1791 u16 width, u16 height, u16 out_width, u16 out_height)
1792{
1793 int DS = DIV_ROUND_UP(height, out_height);
1794 unsigned long nonactive, lclk, pclk;
1795 static const u8 limits[3] = { 8, 10, 20 };
1796 u64 val, blank;
1797 int i;
1798
1799 nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
1800 pclk = dispc_mgr_pclk_rate(channel);
1801 if (dispc_mgr_is_lcd(channel))
1802 lclk = dispc_mgr_lclk_rate(channel);
1803 else
1804 lclk = dispc_fclk_rate();
1805
1806 i = 0;
1807 if (out_height < height)
1808 i++;
1809 if (out_width < width)
1810 i++;
1811 blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
1812 DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
1813 if (blank <= limits[i])
1814 return -EINVAL;
1815
1816 /*
1817 * Pixel data should be prepared before visible display point starts.
1818 * So, atleast DS-2 lines must have already been fetched by DISPC
1819 * during nonactive - pos_x period.
1820 */
1821 val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
1822 DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
1823 val, max(0, DS - 2) * width);
1824 if (val < max(0, DS - 2) * width)
1825 return -EINVAL;
1826
1827 /*
1828 * All lines need to be refilled during the nonactive period of which
1829 * only one line can be loaded during the active period. So, atleast
1830 * DS - 1 lines should be loaded during nonactive period.
1831 */
1832 val = div_u64((u64)nonactive * lclk, pclk);
1833 DSSDBG("nonactive * pcd = %llu, max(0, DS - 1) * width = %d\n",
1834 val, max(0, DS - 1) * width);
1835 if (val < max(0, DS - 1) * width)
1836 return -EINVAL;
1837
1838 return 0;
1839}
1840
1841static unsigned long calc_core_clk_five_taps(enum omap_channel channel,
1842 const struct omap_video_timings *mgr_timings, u16 width,
1653 u16 height, u16 out_width, u16 out_height, 1843 u16 height, u16 out_width, u16 out_height,
1654 enum omap_color_mode color_mode) 1844 enum omap_color_mode color_mode)
1655{ 1845{
1656 u32 fclk = 0; 1846 u32 core_clk = 0;
1657 u64 tmp, pclk = dispc_mgr_pclk_rate(channel); 1847 u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
1658 1848
1659 if (height <= out_height && width <= out_width) 1849 if (height <= out_height && width <= out_width)
1660 return (unsigned long) pclk; 1850 return (unsigned long) pclk;
1661 1851
1662 if (height > out_height) { 1852 if (height > out_height) {
1663 struct omap_dss_device *dssdev = dispc_mgr_get_device(channel); 1853 unsigned int ppl = mgr_timings->x_res;
1664 unsigned int ppl = dssdev->panel.timings.x_res;
1665 1854
1666 tmp = pclk * height * out_width; 1855 tmp = pclk * height * out_width;
1667 do_div(tmp, 2 * out_height * ppl); 1856 do_div(tmp, 2 * out_height * ppl);
1668 fclk = tmp; 1857 core_clk = tmp;
1669 1858
1670 if (height > 2 * out_height) { 1859 if (height > 2 * out_height) {
1671 if (ppl == out_width) 1860 if (ppl == out_width)
@@ -1673,23 +1862,23 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
1673 1862
1674 tmp = pclk * (height - 2 * out_height) * out_width; 1863 tmp = pclk * (height - 2 * out_height) * out_width;
1675 do_div(tmp, 2 * out_height * (ppl - out_width)); 1864 do_div(tmp, 2 * out_height * (ppl - out_width));
1676 fclk = max(fclk, (u32) tmp); 1865 core_clk = max_t(u32, core_clk, tmp);
1677 } 1866 }
1678 } 1867 }
1679 1868
1680 if (width > out_width) { 1869 if (width > out_width) {
1681 tmp = pclk * width; 1870 tmp = pclk * width;
1682 do_div(tmp, out_width); 1871 do_div(tmp, out_width);
1683 fclk = max(fclk, (u32) tmp); 1872 core_clk = max_t(u32, core_clk, tmp);
1684 1873
1685 if (color_mode == OMAP_DSS_COLOR_RGB24U) 1874 if (color_mode == OMAP_DSS_COLOR_RGB24U)
1686 fclk <<= 1; 1875 core_clk <<= 1;
1687 } 1876 }
1688 1877
1689 return fclk; 1878 return core_clk;
1690} 1879}
1691 1880
1692static unsigned long calc_fclk(enum omap_channel channel, u16 width, 1881static unsigned long calc_core_clk(enum omap_channel channel, u16 width,
1693 u16 height, u16 out_width, u16 out_height) 1882 u16 height, u16 out_width, u16 out_height)
1694{ 1883{
1695 unsigned int hf, vf; 1884 unsigned int hf, vf;
@@ -1730,15 +1919,20 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
1730} 1919}
1731 1920
1732static int dispc_ovl_calc_scaling(enum omap_plane plane, 1921static int dispc_ovl_calc_scaling(enum omap_plane plane,
1733 enum omap_channel channel, u16 width, u16 height, 1922 enum omap_channel channel,
1734 u16 out_width, u16 out_height, 1923 const struct omap_video_timings *mgr_timings,
1735 enum omap_color_mode color_mode, bool *five_taps) 1924 u16 width, u16 height, u16 out_width, u16 out_height,
1925 enum omap_color_mode color_mode, bool *five_taps,
1926 int *x_predecim, int *y_predecim, u16 pos_x)
1736{ 1927{
1737 struct omap_overlay *ovl = omap_dss_get_overlay(plane); 1928 struct omap_overlay *ovl = omap_dss_get_overlay(plane);
1738 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); 1929 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
1739 const int maxsinglelinewidth = 1930 const int maxsinglelinewidth =
1740 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); 1931 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
1741 unsigned long fclk = 0; 1932 const int max_decim_limit = 16;
1933 unsigned long core_clk = 0;
1934 int decim_x, decim_y, error, min_factor;
1935 u16 in_width, in_height, in_width_max = 0;
1742 1936
1743 if (width == out_width && height == out_height) 1937 if (width == out_width && height == out_height)
1744 return 0; 1938 return 0;
@@ -1746,64 +1940,154 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
1746 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) 1940 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
1747 return -EINVAL; 1941 return -EINVAL;
1748 1942
1749 if (out_width < width / maxdownscale || 1943 *x_predecim = max_decim_limit;
1750 out_width > width * 8) 1944 *y_predecim = max_decim_limit;
1945
1946 if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
1947 color_mode == OMAP_DSS_COLOR_CLUT2 ||
1948 color_mode == OMAP_DSS_COLOR_CLUT4 ||
1949 color_mode == OMAP_DSS_COLOR_CLUT8) {
1950 *x_predecim = 1;
1951 *y_predecim = 1;
1952 *five_taps = false;
1953 return 0;
1954 }
1955
1956 decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
1957 decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
1958
1959 min_factor = min(decim_x, decim_y);
1960
1961 if (decim_x > *x_predecim || out_width > width * 8)
1751 return -EINVAL; 1962 return -EINVAL;
1752 1963
1753 if (out_height < height / maxdownscale || 1964 if (decim_y > *y_predecim || out_height > height * 8)
1754 out_height > height * 8)
1755 return -EINVAL; 1965 return -EINVAL;
1756 1966
1757 if (cpu_is_omap24xx()) { 1967 if (cpu_is_omap24xx()) {
1758 if (width > maxsinglelinewidth)
1759 DSSERR("Cannot scale max input width exceeded");
1760 *five_taps = false; 1968 *five_taps = false;
1761 fclk = calc_fclk(channel, width, height, out_width, 1969
1762 out_height); 1970 do {
1971 in_height = DIV_ROUND_UP(height, decim_y);
1972 in_width = DIV_ROUND_UP(width, decim_x);
1973 core_clk = calc_core_clk(channel, in_width, in_height,
1974 out_width, out_height);
1975 error = (in_width > maxsinglelinewidth || !core_clk ||
1976 core_clk > dispc_core_clk_rate());
1977 if (error) {
1978 if (decim_x == decim_y) {
1979 decim_x = min_factor;
1980 decim_y++;
1981 } else {
1982 swap(decim_x, decim_y);
1983 if (decim_x < decim_y)
1984 decim_x++;
1985 }
1986 }
1987 } while (decim_x <= *x_predecim && decim_y <= *y_predecim &&
1988 error);
1989
1990 if (in_width > maxsinglelinewidth) {
1991 DSSERR("Cannot scale max input width exceeded");
1992 return -EINVAL;
1993 }
1763 } else if (cpu_is_omap34xx()) { 1994 } else if (cpu_is_omap34xx()) {
1764 if (width > (maxsinglelinewidth * 2)) { 1995
1996 do {
1997 in_height = DIV_ROUND_UP(height, decim_y);
1998 in_width = DIV_ROUND_UP(width, decim_x);
1999 core_clk = calc_core_clk_five_taps(channel, mgr_timings,
2000 in_width, in_height, out_width, out_height,
2001 color_mode);
2002
2003 error = check_horiz_timing_omap3(channel, mgr_timings,
2004 pos_x, in_width, in_height, out_width,
2005 out_height);
2006
2007 if (in_width > maxsinglelinewidth)
2008 if (in_height > out_height &&
2009 in_height < out_height * 2)
2010 *five_taps = false;
2011 if (!*five_taps)
2012 core_clk = calc_core_clk(channel, in_width,
2013 in_height, out_width, out_height);
2014 error = (error || in_width > maxsinglelinewidth * 2 ||
2015 (in_width > maxsinglelinewidth && *five_taps) ||
2016 !core_clk || core_clk > dispc_core_clk_rate());
2017 if (error) {
2018 if (decim_x == decim_y) {
2019 decim_x = min_factor;
2020 decim_y++;
2021 } else {
2022 swap(decim_x, decim_y);
2023 if (decim_x < decim_y)
2024 decim_x++;
2025 }
2026 }
2027 } while (decim_x <= *x_predecim && decim_y <= *y_predecim
2028 && error);
2029
2030 if (check_horiz_timing_omap3(channel, mgr_timings, pos_x, width,
2031 height, out_width, out_height)){
2032 DSSERR("horizontal timing too tight\n");
2033 return -EINVAL;
2034 }
2035
2036 if (in_width > (maxsinglelinewidth * 2)) {
1765 DSSERR("Cannot setup scaling"); 2037 DSSERR("Cannot setup scaling");
1766 DSSERR("width exceeds maximum width possible"); 2038 DSSERR("width exceeds maximum width possible");
1767 return -EINVAL; 2039 return -EINVAL;
1768 } 2040 }
1769 fclk = calc_fclk_five_taps(channel, width, height, out_width, 2041
1770 out_height, color_mode); 2042 if (in_width > maxsinglelinewidth && *five_taps) {
1771 if (width > maxsinglelinewidth) { 2043 DSSERR("cannot setup scaling with five taps");
1772 if (height > out_height && height < out_height * 2) 2044 return -EINVAL;
1773 *five_taps = false;
1774 else {
1775 DSSERR("cannot setup scaling with five taps");
1776 return -EINVAL;
1777 }
1778 } 2045 }
1779 if (!*five_taps)
1780 fclk = calc_fclk(channel, width, height, out_width,
1781 out_height);
1782 } else { 2046 } else {
1783 if (width > maxsinglelinewidth) { 2047 int decim_x_min = decim_x;
2048 in_height = DIV_ROUND_UP(height, decim_y);
2049 in_width_max = dispc_core_clk_rate() /
2050 DIV_ROUND_UP(dispc_mgr_pclk_rate(channel),
2051 out_width);
2052 decim_x = DIV_ROUND_UP(width, in_width_max);
2053
2054 decim_x = decim_x > decim_x_min ? decim_x : decim_x_min;
2055 if (decim_x > *x_predecim)
2056 return -EINVAL;
2057
2058 do {
2059 in_width = DIV_ROUND_UP(width, decim_x);
2060 } while (decim_x <= *x_predecim &&
2061 in_width > maxsinglelinewidth && decim_x++);
2062
2063 if (in_width > maxsinglelinewidth) {
1784 DSSERR("Cannot scale width exceeds max line width"); 2064 DSSERR("Cannot scale width exceeds max line width");
1785 return -EINVAL; 2065 return -EINVAL;
1786 } 2066 }
1787 fclk = calc_fclk(channel, width, height, out_width, 2067
1788 out_height); 2068 core_clk = calc_core_clk(channel, in_width, in_height,
2069 out_width, out_height);
1789 } 2070 }
1790 2071
1791 DSSDBG("required fclk rate = %lu Hz\n", fclk); 2072 DSSDBG("required core clk rate = %lu Hz\n", core_clk);
1792 DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate()); 2073 DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate());
1793 2074
1794 if (!fclk || fclk > dispc_fclk_rate()) { 2075 if (!core_clk || core_clk > dispc_core_clk_rate()) {
1795 DSSERR("failed to set up scaling, " 2076 DSSERR("failed to set up scaling, "
1796 "required fclk rate = %lu Hz, " 2077 "required core clk rate = %lu Hz, "
1797 "current fclk rate = %lu Hz\n", 2078 "current core clk rate = %lu Hz\n",
1798 fclk, dispc_fclk_rate()); 2079 core_clk, dispc_core_clk_rate());
1799 return -EINVAL; 2080 return -EINVAL;
1800 } 2081 }
1801 2082
2083 *x_predecim = decim_x;
2084 *y_predecim = decim_y;
1802 return 0; 2085 return 0;
1803} 2086}
1804 2087
1805int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, 2088int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1806 bool ilace, bool replication) 2089 bool ilace, bool replication,
2090 const struct omap_video_timings *mgr_timings)
1807{ 2091{
1808 struct omap_overlay *ovl = omap_dss_get_overlay(plane); 2092 struct omap_overlay *ovl = omap_dss_get_overlay(plane);
1809 bool five_taps = true; 2093 bool five_taps = true;
@@ -1814,8 +2098,11 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1814 s32 pix_inc; 2098 s32 pix_inc;
1815 u16 frame_height = oi->height; 2099 u16 frame_height = oi->height;
1816 unsigned int field_offset = 0; 2100 unsigned int field_offset = 0;
1817 u16 outw, outh; 2101 u16 in_height = oi->height;
2102 u16 in_width = oi->width;
2103 u16 out_width, out_height;
1818 enum omap_channel channel; 2104 enum omap_channel channel;
2105 int x_predecim = 1, y_predecim = 1;
1819 2106
1820 channel = dispc_ovl_get_channel_out(plane); 2107 channel = dispc_ovl_get_channel_out(plane);
1821 2108
@@ -1829,32 +2116,35 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1829 if (oi->paddr == 0) 2116 if (oi->paddr == 0)
1830 return -EINVAL; 2117 return -EINVAL;
1831 2118
1832 outw = oi->out_width == 0 ? oi->width : oi->out_width; 2119 out_width = oi->out_width == 0 ? oi->width : oi->out_width;
1833 outh = oi->out_height == 0 ? oi->height : oi->out_height; 2120 out_height = oi->out_height == 0 ? oi->height : oi->out_height;
1834 2121
1835 if (ilace && oi->height == outh) 2122 if (ilace && oi->height == out_height)
1836 fieldmode = 1; 2123 fieldmode = 1;
1837 2124
1838 if (ilace) { 2125 if (ilace) {
1839 if (fieldmode) 2126 if (fieldmode)
1840 oi->height /= 2; 2127 in_height /= 2;
1841 oi->pos_y /= 2; 2128 oi->pos_y /= 2;
1842 outh /= 2; 2129 out_height /= 2;
1843 2130
1844 DSSDBG("adjusting for ilace: height %d, pos_y %d, " 2131 DSSDBG("adjusting for ilace: height %d, pos_y %d, "
1845 "out_height %d\n", 2132 "out_height %d\n",
1846 oi->height, oi->pos_y, outh); 2133 in_height, oi->pos_y, out_height);
1847 } 2134 }
1848 2135
1849 if (!dss_feat_color_mode_supported(plane, oi->color_mode)) 2136 if (!dss_feat_color_mode_supported(plane, oi->color_mode))
1850 return -EINVAL; 2137 return -EINVAL;
1851 2138
1852 r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height, 2139 r = dispc_ovl_calc_scaling(plane, channel, mgr_timings, in_width,
1853 outw, outh, oi->color_mode, 2140 in_height, out_width, out_height, oi->color_mode,
1854 &five_taps); 2141 &five_taps, &x_predecim, &y_predecim, oi->pos_x);
1855 if (r) 2142 if (r)
1856 return r; 2143 return r;
1857 2144
2145 in_width = DIV_ROUND_UP(in_width, x_predecim);
2146 in_height = DIV_ROUND_UP(in_height, y_predecim);
2147
1858 if (oi->color_mode == OMAP_DSS_COLOR_YUV2 || 2148 if (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
1859 oi->color_mode == OMAP_DSS_COLOR_UYVY || 2149 oi->color_mode == OMAP_DSS_COLOR_UYVY ||
1860 oi->color_mode == OMAP_DSS_COLOR_NV12) 2150 oi->color_mode == OMAP_DSS_COLOR_NV12)
@@ -1868,32 +2158,46 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1868 * so the integer part must be added to the base address of the 2158 * so the integer part must be added to the base address of the
1869 * bottom field. 2159 * bottom field.
1870 */ 2160 */
1871 if (!oi->height || oi->height == outh) 2161 if (!in_height || in_height == out_height)
1872 field_offset = 0; 2162 field_offset = 0;
1873 else 2163 else
1874 field_offset = oi->height / outh / 2; 2164 field_offset = in_height / out_height / 2;
1875 } 2165 }
1876 2166
1877 /* Fields are independent but interleaved in memory. */ 2167 /* Fields are independent but interleaved in memory. */
1878 if (fieldmode) 2168 if (fieldmode)
1879 field_offset = 1; 2169 field_offset = 1;
1880 2170
1881 if (oi->rotation_type == OMAP_DSS_ROT_DMA) 2171 offset0 = 0;
2172 offset1 = 0;
2173 row_inc = 0;
2174 pix_inc = 0;
2175
2176 if (oi->rotation_type == OMAP_DSS_ROT_TILER)
2177 calc_tiler_rotation_offset(oi->screen_width, in_width,
2178 oi->color_mode, fieldmode, field_offset,
2179 &offset0, &offset1, &row_inc, &pix_inc,
2180 x_predecim, y_predecim);
2181 else if (oi->rotation_type == OMAP_DSS_ROT_DMA)
1882 calc_dma_rotation_offset(oi->rotation, oi->mirror, 2182 calc_dma_rotation_offset(oi->rotation, oi->mirror,
1883 oi->screen_width, oi->width, frame_height, 2183 oi->screen_width, in_width, frame_height,
1884 oi->color_mode, fieldmode, field_offset, 2184 oi->color_mode, fieldmode, field_offset,
1885 &offset0, &offset1, &row_inc, &pix_inc); 2185 &offset0, &offset1, &row_inc, &pix_inc,
2186 x_predecim, y_predecim);
1886 else 2187 else
1887 calc_vrfb_rotation_offset(oi->rotation, oi->mirror, 2188 calc_vrfb_rotation_offset(oi->rotation, oi->mirror,
1888 oi->screen_width, oi->width, frame_height, 2189 oi->screen_width, in_width, frame_height,
1889 oi->color_mode, fieldmode, field_offset, 2190 oi->color_mode, fieldmode, field_offset,
1890 &offset0, &offset1, &row_inc, &pix_inc); 2191 &offset0, &offset1, &row_inc, &pix_inc,
2192 x_predecim, y_predecim);
1891 2193
1892 DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n", 2194 DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
1893 offset0, offset1, row_inc, pix_inc); 2195 offset0, offset1, row_inc, pix_inc);
1894 2196
1895 dispc_ovl_set_color_mode(plane, oi->color_mode); 2197 dispc_ovl_set_color_mode(plane, oi->color_mode);
1896 2198
2199 dispc_ovl_configure_burst_type(plane, oi->rotation_type);
2200
1897 dispc_ovl_set_ba0(plane, oi->paddr + offset0); 2201 dispc_ovl_set_ba0(plane, oi->paddr + offset0);
1898 dispc_ovl_set_ba1(plane, oi->paddr + offset1); 2202 dispc_ovl_set_ba1(plane, oi->paddr + offset1);
1899 2203
@@ -1906,19 +2210,18 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1906 dispc_ovl_set_row_inc(plane, row_inc); 2210 dispc_ovl_set_row_inc(plane, row_inc);
1907 dispc_ovl_set_pix_inc(plane, pix_inc); 2211 dispc_ovl_set_pix_inc(plane, pix_inc);
1908 2212
1909 DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width, 2213 DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, in_width,
1910 oi->height, outw, outh); 2214 in_height, out_width, out_height);
1911 2215
1912 dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y); 2216 dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
1913 2217
1914 dispc_ovl_set_pic_size(plane, oi->width, oi->height); 2218 dispc_ovl_set_pic_size(plane, in_width, in_height);
1915 2219
1916 if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) { 2220 if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
1917 dispc_ovl_set_scaling(plane, oi->width, oi->height, 2221 dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
1918 outw, outh, 2222 out_height, ilace, five_taps, fieldmode,
1919 ilace, five_taps, fieldmode,
1920 oi->color_mode, oi->rotation); 2223 oi->color_mode, oi->rotation);
1921 dispc_ovl_set_vid_size(plane, outw, outh); 2224 dispc_ovl_set_vid_size(plane, out_width, out_height);
1922 dispc_ovl_set_vid_color_conv(plane, cconv); 2225 dispc_ovl_set_vid_color_conv(plane, cconv);
1923 } 2226 }
1924 2227
@@ -2087,8 +2390,10 @@ bool dispc_mgr_is_enabled(enum omap_channel channel)
2087 return !!REG_GET(DISPC_CONTROL, 1, 1); 2390 return !!REG_GET(DISPC_CONTROL, 1, 1);
2088 else if (channel == OMAP_DSS_CHANNEL_LCD2) 2391 else if (channel == OMAP_DSS_CHANNEL_LCD2)
2089 return !!REG_GET(DISPC_CONTROL2, 0, 0); 2392 return !!REG_GET(DISPC_CONTROL2, 0, 0);
2090 else 2393 else {
2091 BUG(); 2394 BUG();
2395 return false;
2396 }
2092} 2397}
2093 2398
2094void dispc_mgr_enable(enum omap_channel channel, bool enable) 2399void dispc_mgr_enable(enum omap_channel channel, bool enable)
@@ -2285,6 +2590,12 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
2285 REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11); 2590 REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
2286} 2591}
2287 2592
2593static bool _dispc_mgr_size_ok(u16 width, u16 height)
2594{
2595 return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) &&
2596 height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT);
2597}
2598
2288static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, 2599static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
2289 int vsw, int vfp, int vbp) 2600 int vsw, int vfp, int vbp)
2290{ 2601{
@@ -2309,11 +2620,20 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
2309 return true; 2620 return true;
2310} 2621}
2311 2622
2312bool dispc_lcd_timings_ok(struct omap_video_timings *timings) 2623bool dispc_mgr_timings_ok(enum omap_channel channel,
2624 const struct omap_video_timings *timings)
2313{ 2625{
2314 return _dispc_lcd_timings_ok(timings->hsw, timings->hfp, 2626 bool timings_ok;
2315 timings->hbp, timings->vsw, 2627
2316 timings->vfp, timings->vbp); 2628 timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
2629
2630 if (dispc_mgr_is_lcd(channel))
2631 timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw,
2632 timings->hfp, timings->hbp,
2633 timings->vsw, timings->vfp,
2634 timings->vbp);
2635
2636 return timings_ok;
2317} 2637}
2318 2638
2319static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, 2639static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
@@ -2340,37 +2660,45 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
2340} 2660}
2341 2661
2342/* change name to mode? */ 2662/* change name to mode? */
2343void dispc_mgr_set_lcd_timings(enum omap_channel channel, 2663void dispc_mgr_set_timings(enum omap_channel channel,
2344 struct omap_video_timings *timings) 2664 struct omap_video_timings *timings)
2345{ 2665{
2346 unsigned xtot, ytot; 2666 unsigned xtot, ytot;
2347 unsigned long ht, vt; 2667 unsigned long ht, vt;
2668 struct omap_video_timings t = *timings;
2669
2670 DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
2348 2671
2349 if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp, 2672 if (!dispc_mgr_timings_ok(channel, &t)) {
2350 timings->hbp, timings->vsw,
2351 timings->vfp, timings->vbp))
2352 BUG(); 2673 BUG();
2674 return;
2675 }
2676
2677 if (dispc_mgr_is_lcd(channel)) {
2678 _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
2679 t.vfp, t.vbp);
2680
2681 xtot = t.x_res + t.hfp + t.hsw + t.hbp;
2682 ytot = t.y_res + t.vfp + t.vsw + t.vbp;
2353 2683
2354 _dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp, 2684 ht = (timings->pixel_clock * 1000) / xtot;
2355 timings->hbp, timings->vsw, timings->vfp, 2685 vt = (timings->pixel_clock * 1000) / xtot / ytot;
2356 timings->vbp);
2357 2686
2358 dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res); 2687 DSSDBG("pck %u\n", timings->pixel_clock);
2688 DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
2689 t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
2359 2690
2360 xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp; 2691 DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
2361 ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp; 2692 } else {
2693 enum dss_hdmi_venc_clk_source_select source;
2362 2694
2363 ht = (timings->pixel_clock * 1000) / xtot; 2695 source = dss_get_hdmi_venc_clk_source();
2364 vt = (timings->pixel_clock * 1000) / xtot / ytot;
2365 2696
2366 DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res, 2697 if (source == DSS_VENC_TV_CLK)
2367 timings->y_res); 2698 t.y_res /= 2;
2368 DSSDBG("pck %u\n", timings->pixel_clock); 2699 }
2369 DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
2370 timings->hsw, timings->hfp, timings->hbp,
2371 timings->vsw, timings->vfp, timings->vbp);
2372 2700
2373 DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); 2701 dispc_mgr_set_size(channel, t.x_res, t.y_res);
2374} 2702}
2375 2703
2376static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div, 2704static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
@@ -2411,6 +2739,7 @@ unsigned long dispc_fclk_rate(void)
2411 break; 2739 break;
2412 default: 2740 default:
2413 BUG(); 2741 BUG();
2742 return 0;
2414 } 2743 }
2415 2744
2416 return r; 2745 return r;
@@ -2441,6 +2770,7 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
2441 break; 2770 break;
2442 default: 2771 default:
2443 BUG(); 2772 BUG();
2773 return 0;
2444 } 2774 }
2445 2775
2446 return r / lcd; 2776 return r / lcd;
@@ -2462,20 +2792,35 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
2462 2792
2463 return r / pcd; 2793 return r / pcd;
2464 } else { 2794 } else {
2465 struct omap_dss_device *dssdev = 2795 enum dss_hdmi_venc_clk_source_select source;
2466 dispc_mgr_get_device(channel);
2467 2796
2468 switch (dssdev->type) { 2797 source = dss_get_hdmi_venc_clk_source();
2469 case OMAP_DISPLAY_TYPE_VENC: 2798
2799 switch (source) {
2800 case DSS_VENC_TV_CLK:
2470 return venc_get_pixel_clock(); 2801 return venc_get_pixel_clock();
2471 case OMAP_DISPLAY_TYPE_HDMI: 2802 case DSS_HDMI_M_PCLK:
2472 return hdmi_get_pixel_clock(); 2803 return hdmi_get_pixel_clock();
2473 default: 2804 default:
2474 BUG(); 2805 BUG();
2806 return 0;
2475 } 2807 }
2476 } 2808 }
2477} 2809}
2478 2810
2811unsigned long dispc_core_clk_rate(void)
2812{
2813 int lcd;
2814 unsigned long fclk = dispc_fclk_rate();
2815
2816 if (dss_has_feature(FEAT_CORE_CLK_DIV))
2817 lcd = REG_GET(DISPC_DIVISOR, 23, 16);
2818 else
2819 lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
2820
2821 return fclk / lcd;
2822}
2823
2479void dispc_dump_clocks(struct seq_file *s) 2824void dispc_dump_clocks(struct seq_file *s)
2480{ 2825{
2481 int lcd, pcd; 2826 int lcd, pcd;
@@ -2588,7 +2933,7 @@ void dispc_dump_irqs(struct seq_file *s)
2588} 2933}
2589#endif 2934#endif
2590 2935
2591void dispc_dump_regs(struct seq_file *s) 2936static void dispc_dump_regs(struct seq_file *s)
2592{ 2937{
2593 int i, j; 2938 int i, j;
2594 const char *mgr_names[] = { 2939 const char *mgr_names[] = {
@@ -3247,27 +3592,6 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
3247 return 0; 3592 return 0;
3248} 3593}
3249 3594
3250#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3251void dispc_fake_vsync_irq(void)
3252{
3253 u32 irqstatus = DISPC_IRQ_VSYNC;
3254 int i;
3255
3256 WARN_ON(!in_interrupt());
3257
3258 for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
3259 struct omap_dispc_isr_data *isr_data;
3260 isr_data = &dispc.registered_isr[i];
3261
3262 if (!isr_data->isr)
3263 continue;
3264
3265 if (isr_data->mask & irqstatus)
3266 isr_data->isr(isr_data->arg, irqstatus);
3267 }
3268}
3269#endif
3270
3271static void _omap_dispc_initialize_irq(void) 3595static void _omap_dispc_initialize_irq(void)
3272{ 3596{
3273 unsigned long flags; 3597 unsigned long flags;
@@ -3330,7 +3654,7 @@ static void _omap_dispc_initial_config(void)
3330} 3654}
3331 3655
3332/* DISPC HW IP initialisation */ 3656/* DISPC HW IP initialisation */
3333static int omap_dispchw_probe(struct platform_device *pdev) 3657static int __init omap_dispchw_probe(struct platform_device *pdev)
3334{ 3658{
3335 u32 rev; 3659 u32 rev;
3336 int r = 0; 3660 int r = 0;
@@ -3399,6 +3723,11 @@ static int omap_dispchw_probe(struct platform_device *pdev)
3399 3723
3400 dispc_runtime_put(); 3724 dispc_runtime_put();
3401 3725
3726 dss_debugfs_create_file("dispc", dispc_dump_regs);
3727
3728#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
3729 dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
3730#endif
3402 return 0; 3731 return 0;
3403 3732
3404err_runtime_get: 3733err_runtime_get:
@@ -3407,7 +3736,7 @@ err_runtime_get:
3407 return r; 3736 return r;
3408} 3737}
3409 3738
3410static int omap_dispchw_remove(struct platform_device *pdev) 3739static int __exit omap_dispchw_remove(struct platform_device *pdev)
3411{ 3740{
3412 pm_runtime_disable(&pdev->dev); 3741 pm_runtime_disable(&pdev->dev);
3413 3742
@@ -3419,19 +3748,12 @@ static int omap_dispchw_remove(struct platform_device *pdev)
3419static int dispc_runtime_suspend(struct device *dev) 3748static int dispc_runtime_suspend(struct device *dev)
3420{ 3749{
3421 dispc_save_context(); 3750 dispc_save_context();
3422 dss_runtime_put();
3423 3751
3424 return 0; 3752 return 0;
3425} 3753}
3426 3754
3427static int dispc_runtime_resume(struct device *dev) 3755static int dispc_runtime_resume(struct device *dev)
3428{ 3756{
3429 int r;
3430
3431 r = dss_runtime_get();
3432 if (r < 0)
3433 return r;
3434
3435 dispc_restore_context(); 3757 dispc_restore_context();
3436 3758
3437 return 0; 3759 return 0;
@@ -3443,8 +3765,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
3443}; 3765};
3444 3766
3445static struct platform_driver omap_dispchw_driver = { 3767static struct platform_driver omap_dispchw_driver = {
3446 .probe = omap_dispchw_probe, 3768 .remove = __exit_p(omap_dispchw_remove),
3447 .remove = omap_dispchw_remove,
3448 .driver = { 3769 .driver = {
3449 .name = "omapdss_dispc", 3770 .name = "omapdss_dispc",
3450 .owner = THIS_MODULE, 3771 .owner = THIS_MODULE,
@@ -3452,12 +3773,12 @@ static struct platform_driver omap_dispchw_driver = {
3452 }, 3773 },
3453}; 3774};
3454 3775
3455int dispc_init_platform_driver(void) 3776int __init dispc_init_platform_driver(void)
3456{ 3777{
3457 return platform_driver_register(&omap_dispchw_driver); 3778 return platform_driver_probe(&omap_dispchw_driver, omap_dispchw_probe);
3458} 3779}
3459 3780
3460void dispc_uninit_platform_driver(void) 3781void __exit dispc_uninit_platform_driver(void)
3461{ 3782{
3462 return platform_driver_unregister(&omap_dispchw_driver); 3783 platform_driver_unregister(&omap_dispchw_driver);
3463} 3784}
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index 5836bd1650f9..f278080e1063 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -120,6 +120,7 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
120 return 0x03AC; 120 return 0x03AC;
121 default: 121 default:
122 BUG(); 122 BUG();
123 return 0;
123 } 124 }
124} 125}
125 126
@@ -134,6 +135,7 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel)
134 return 0x03B0; 135 return 0x03B0;
135 default: 136 default:
136 BUG(); 137 BUG();
138 return 0;
137 } 139 }
138} 140}
139 141
@@ -144,10 +146,12 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel)
144 return 0x0064; 146 return 0x0064;
145 case OMAP_DSS_CHANNEL_DIGIT: 147 case OMAP_DSS_CHANNEL_DIGIT:
146 BUG(); 148 BUG();
149 return 0;
147 case OMAP_DSS_CHANNEL_LCD2: 150 case OMAP_DSS_CHANNEL_LCD2:
148 return 0x0400; 151 return 0x0400;
149 default: 152 default:
150 BUG(); 153 BUG();
154 return 0;
151 } 155 }
152} 156}
153 157
@@ -158,10 +162,12 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel)
158 return 0x0068; 162 return 0x0068;
159 case OMAP_DSS_CHANNEL_DIGIT: 163 case OMAP_DSS_CHANNEL_DIGIT:
160 BUG(); 164 BUG();
165 return 0;
161 case OMAP_DSS_CHANNEL_LCD2: 166 case OMAP_DSS_CHANNEL_LCD2:
162 return 0x0404; 167 return 0x0404;
163 default: 168 default:
164 BUG(); 169 BUG();
170 return 0;
165 } 171 }
166} 172}
167 173
@@ -172,10 +178,12 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel)
172 return 0x006C; 178 return 0x006C;
173 case OMAP_DSS_CHANNEL_DIGIT: 179 case OMAP_DSS_CHANNEL_DIGIT:
174 BUG(); 180 BUG();
181 return 0;
175 case OMAP_DSS_CHANNEL_LCD2: 182 case OMAP_DSS_CHANNEL_LCD2:
176 return 0x0408; 183 return 0x0408;
177 default: 184 default:
178 BUG(); 185 BUG();
186 return 0;
179 } 187 }
180} 188}
181 189
@@ -186,10 +194,12 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel)
186 return 0x0070; 194 return 0x0070;
187 case OMAP_DSS_CHANNEL_DIGIT: 195 case OMAP_DSS_CHANNEL_DIGIT:
188 BUG(); 196 BUG();
197 return 0;
189 case OMAP_DSS_CHANNEL_LCD2: 198 case OMAP_DSS_CHANNEL_LCD2:
190 return 0x040C; 199 return 0x040C;
191 default: 200 default:
192 BUG(); 201 BUG();
202 return 0;
193 } 203 }
194} 204}
195 205
@@ -205,6 +215,7 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel)
205 return 0x03CC; 215 return 0x03CC;
206 default: 216 default:
207 BUG(); 217 BUG();
218 return 0;
208 } 219 }
209} 220}
210 221
@@ -215,10 +226,12 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel)
215 return 0x01D4; 226 return 0x01D4;
216 case OMAP_DSS_CHANNEL_DIGIT: 227 case OMAP_DSS_CHANNEL_DIGIT:
217 BUG(); 228 BUG();
229 return 0;
218 case OMAP_DSS_CHANNEL_LCD2: 230 case OMAP_DSS_CHANNEL_LCD2:
219 return 0x03C0; 231 return 0x03C0;
220 default: 232 default:
221 BUG(); 233 BUG();
234 return 0;
222 } 235 }
223} 236}
224 237
@@ -229,10 +242,12 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel)
229 return 0x01D8; 242 return 0x01D8;
230 case OMAP_DSS_CHANNEL_DIGIT: 243 case OMAP_DSS_CHANNEL_DIGIT:
231 BUG(); 244 BUG();
245 return 0;
232 case OMAP_DSS_CHANNEL_LCD2: 246 case OMAP_DSS_CHANNEL_LCD2:
233 return 0x03C4; 247 return 0x03C4;
234 default: 248 default:
235 BUG(); 249 BUG();
250 return 0;
236 } 251 }
237} 252}
238 253
@@ -243,10 +258,12 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel)
243 return 0x01DC; 258 return 0x01DC;
244 case OMAP_DSS_CHANNEL_DIGIT: 259 case OMAP_DSS_CHANNEL_DIGIT:
245 BUG(); 260 BUG();
261 return 0;
246 case OMAP_DSS_CHANNEL_LCD2: 262 case OMAP_DSS_CHANNEL_LCD2:
247 return 0x03C8; 263 return 0x03C8;
248 default: 264 default:
249 BUG(); 265 BUG();
266 return 0;
250 } 267 }
251} 268}
252 269
@@ -257,10 +274,12 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel)
257 return 0x0220; 274 return 0x0220;
258 case OMAP_DSS_CHANNEL_DIGIT: 275 case OMAP_DSS_CHANNEL_DIGIT:
259 BUG(); 276 BUG();
277 return 0;
260 case OMAP_DSS_CHANNEL_LCD2: 278 case OMAP_DSS_CHANNEL_LCD2:
261 return 0x03BC; 279 return 0x03BC;
262 default: 280 default:
263 BUG(); 281 BUG();
282 return 0;
264 } 283 }
265} 284}
266 285
@@ -271,10 +290,12 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel)
271 return 0x0224; 290 return 0x0224;
272 case OMAP_DSS_CHANNEL_DIGIT: 291 case OMAP_DSS_CHANNEL_DIGIT:
273 BUG(); 292 BUG();
293 return 0;
274 case OMAP_DSS_CHANNEL_LCD2: 294 case OMAP_DSS_CHANNEL_LCD2:
275 return 0x03B8; 295 return 0x03B8;
276 default: 296 default:
277 BUG(); 297 BUG();
298 return 0;
278 } 299 }
279} 300}
280 301
@@ -285,10 +306,12 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel)
285 return 0x0228; 306 return 0x0228;
286 case OMAP_DSS_CHANNEL_DIGIT: 307 case OMAP_DSS_CHANNEL_DIGIT:
287 BUG(); 308 BUG();
309 return 0;
288 case OMAP_DSS_CHANNEL_LCD2: 310 case OMAP_DSS_CHANNEL_LCD2:
289 return 0x03B4; 311 return 0x03B4;
290 default: 312 default:
291 BUG(); 313 BUG();
314 return 0;
292 } 315 }
293} 316}
294 317
@@ -306,6 +329,7 @@ static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
306 return 0x0300; 329 return 0x0300;
307 default: 330 default:
308 BUG(); 331 BUG();
332 return 0;
309 } 333 }
310} 334}
311 335
@@ -321,6 +345,7 @@ static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
321 return 0x0008; 345 return 0x0008;
322 default: 346 default:
323 BUG(); 347 BUG();
348 return 0;
324 } 349 }
325} 350}
326 351
@@ -335,6 +360,7 @@ static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
335 return 0x000C; 360 return 0x000C;
336 default: 361 default:
337 BUG(); 362 BUG();
363 return 0;
338 } 364 }
339} 365}
340 366
@@ -343,6 +369,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
343 switch (plane) { 369 switch (plane) {
344 case OMAP_DSS_GFX: 370 case OMAP_DSS_GFX:
345 BUG(); 371 BUG();
372 return 0;
346 case OMAP_DSS_VIDEO1: 373 case OMAP_DSS_VIDEO1:
347 return 0x0544; 374 return 0x0544;
348 case OMAP_DSS_VIDEO2: 375 case OMAP_DSS_VIDEO2:
@@ -351,6 +378,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
351 return 0x0310; 378 return 0x0310;
352 default: 379 default:
353 BUG(); 380 BUG();
381 return 0;
354 } 382 }
355} 383}
356 384
@@ -359,6 +387,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
359 switch (plane) { 387 switch (plane) {
360 case OMAP_DSS_GFX: 388 case OMAP_DSS_GFX:
361 BUG(); 389 BUG();
390 return 0;
362 case OMAP_DSS_VIDEO1: 391 case OMAP_DSS_VIDEO1:
363 return 0x0548; 392 return 0x0548;
364 case OMAP_DSS_VIDEO2: 393 case OMAP_DSS_VIDEO2:
@@ -367,6 +396,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
367 return 0x0314; 396 return 0x0314;
368 default: 397 default:
369 BUG(); 398 BUG();
399 return 0;
370 } 400 }
371} 401}
372 402
@@ -381,6 +411,7 @@ static inline u16 DISPC_POS_OFFSET(enum omap_plane plane)
381 return 0x009C; 411 return 0x009C;
382 default: 412 default:
383 BUG(); 413 BUG();
414 return 0;
384 } 415 }
385} 416}
386 417
@@ -395,6 +426,7 @@ static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
395 return 0x00A8; 426 return 0x00A8;
396 default: 427 default:
397 BUG(); 428 BUG();
429 return 0;
398 } 430 }
399} 431}
400 432
@@ -410,6 +442,7 @@ static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
410 return 0x0070; 442 return 0x0070;
411 default: 443 default:
412 BUG(); 444 BUG();
445 return 0;
413 } 446 }
414} 447}
415 448
@@ -418,6 +451,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
418 switch (plane) { 451 switch (plane) {
419 case OMAP_DSS_GFX: 452 case OMAP_DSS_GFX:
420 BUG(); 453 BUG();
454 return 0;
421 case OMAP_DSS_VIDEO1: 455 case OMAP_DSS_VIDEO1:
422 return 0x0568; 456 return 0x0568;
423 case OMAP_DSS_VIDEO2: 457 case OMAP_DSS_VIDEO2:
@@ -426,6 +460,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
426 return 0x032C; 460 return 0x032C;
427 default: 461 default:
428 BUG(); 462 BUG();
463 return 0;
429 } 464 }
430} 465}
431 466
@@ -441,6 +476,7 @@ static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
441 return 0x008C; 476 return 0x008C;
442 default: 477 default:
443 BUG(); 478 BUG();
479 return 0;
444 } 480 }
445} 481}
446 482
@@ -456,6 +492,7 @@ static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
456 return 0x0088; 492 return 0x0088;
457 default: 493 default:
458 BUG(); 494 BUG();
495 return 0;
459 } 496 }
460} 497}
461 498
@@ -471,6 +508,7 @@ static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
471 return 0x00A4; 508 return 0x00A4;
472 default: 509 default:
473 BUG(); 510 BUG();
511 return 0;
474 } 512 }
475} 513}
476 514
@@ -486,6 +524,7 @@ static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
486 return 0x0098; 524 return 0x0098;
487 default: 525 default:
488 BUG(); 526 BUG();
527 return 0;
489 } 528 }
490} 529}
491 530
@@ -498,8 +537,10 @@ static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane)
498 case OMAP_DSS_VIDEO2: 537 case OMAP_DSS_VIDEO2:
499 case OMAP_DSS_VIDEO3: 538 case OMAP_DSS_VIDEO3:
500 BUG(); 539 BUG();
540 return 0;
501 default: 541 default:
502 BUG(); 542 BUG();
543 return 0;
503 } 544 }
504} 545}
505 546
@@ -512,8 +553,10 @@ static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane)
512 case OMAP_DSS_VIDEO2: 553 case OMAP_DSS_VIDEO2:
513 case OMAP_DSS_VIDEO3: 554 case OMAP_DSS_VIDEO3:
514 BUG(); 555 BUG();
556 return 0;
515 default: 557 default:
516 BUG(); 558 BUG();
559 return 0;
517 } 560 }
518} 561}
519 562
@@ -522,6 +565,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
522 switch (plane) { 565 switch (plane) {
523 case OMAP_DSS_GFX: 566 case OMAP_DSS_GFX:
524 BUG(); 567 BUG();
568 return 0;
525 case OMAP_DSS_VIDEO1: 569 case OMAP_DSS_VIDEO1:
526 case OMAP_DSS_VIDEO2: 570 case OMAP_DSS_VIDEO2:
527 return 0x0024; 571 return 0x0024;
@@ -529,6 +573,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
529 return 0x0090; 573 return 0x0090;
530 default: 574 default:
531 BUG(); 575 BUG();
576 return 0;
532 } 577 }
533} 578}
534 579
@@ -537,6 +582,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
537 switch (plane) { 582 switch (plane) {
538 case OMAP_DSS_GFX: 583 case OMAP_DSS_GFX:
539 BUG(); 584 BUG();
585 return 0;
540 case OMAP_DSS_VIDEO1: 586 case OMAP_DSS_VIDEO1:
541 return 0x0580; 587 return 0x0580;
542 case OMAP_DSS_VIDEO2: 588 case OMAP_DSS_VIDEO2:
@@ -545,6 +591,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
545 return 0x0424; 591 return 0x0424;
546 default: 592 default:
547 BUG(); 593 BUG();
594 return 0;
548 } 595 }
549} 596}
550 597
@@ -553,6 +600,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
553 switch (plane) { 600 switch (plane) {
554 case OMAP_DSS_GFX: 601 case OMAP_DSS_GFX:
555 BUG(); 602 BUG();
603 return 0;
556 case OMAP_DSS_VIDEO1: 604 case OMAP_DSS_VIDEO1:
557 case OMAP_DSS_VIDEO2: 605 case OMAP_DSS_VIDEO2:
558 return 0x0028; 606 return 0x0028;
@@ -560,6 +608,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
560 return 0x0094; 608 return 0x0094;
561 default: 609 default:
562 BUG(); 610 BUG();
611 return 0;
563 } 612 }
564} 613}
565 614
@@ -569,6 +618,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
569 switch (plane) { 618 switch (plane) {
570 case OMAP_DSS_GFX: 619 case OMAP_DSS_GFX:
571 BUG(); 620 BUG();
621 return 0;
572 case OMAP_DSS_VIDEO1: 622 case OMAP_DSS_VIDEO1:
573 case OMAP_DSS_VIDEO2: 623 case OMAP_DSS_VIDEO2:
574 return 0x002C; 624 return 0x002C;
@@ -576,6 +626,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
576 return 0x0000; 626 return 0x0000;
577 default: 627 default:
578 BUG(); 628 BUG();
629 return 0;
579 } 630 }
580} 631}
581 632
@@ -584,6 +635,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
584 switch (plane) { 635 switch (plane) {
585 case OMAP_DSS_GFX: 636 case OMAP_DSS_GFX:
586 BUG(); 637 BUG();
638 return 0;
587 case OMAP_DSS_VIDEO1: 639 case OMAP_DSS_VIDEO1:
588 return 0x0584; 640 return 0x0584;
589 case OMAP_DSS_VIDEO2: 641 case OMAP_DSS_VIDEO2:
@@ -592,6 +644,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
592 return 0x0428; 644 return 0x0428;
593 default: 645 default:
594 BUG(); 646 BUG();
647 return 0;
595 } 648 }
596} 649}
597 650
@@ -600,6 +653,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
600 switch (plane) { 653 switch (plane) {
601 case OMAP_DSS_GFX: 654 case OMAP_DSS_GFX:
602 BUG(); 655 BUG();
656 return 0;
603 case OMAP_DSS_VIDEO1: 657 case OMAP_DSS_VIDEO1:
604 case OMAP_DSS_VIDEO2: 658 case OMAP_DSS_VIDEO2:
605 return 0x0030; 659 return 0x0030;
@@ -607,6 +661,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
607 return 0x0004; 661 return 0x0004;
608 default: 662 default:
609 BUG(); 663 BUG();
664 return 0;
610 } 665 }
611} 666}
612 667
@@ -615,6 +670,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
615 switch (plane) { 670 switch (plane) {
616 case OMAP_DSS_GFX: 671 case OMAP_DSS_GFX:
617 BUG(); 672 BUG();
673 return 0;
618 case OMAP_DSS_VIDEO1: 674 case OMAP_DSS_VIDEO1:
619 return 0x0588; 675 return 0x0588;
620 case OMAP_DSS_VIDEO2: 676 case OMAP_DSS_VIDEO2:
@@ -623,6 +679,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
623 return 0x042C; 679 return 0x042C;
624 default: 680 default:
625 BUG(); 681 BUG();
682 return 0;
626 } 683 }
627} 684}
628 685
@@ -632,6 +689,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
632 switch (plane) { 689 switch (plane) {
633 case OMAP_DSS_GFX: 690 case OMAP_DSS_GFX:
634 BUG(); 691 BUG();
692 return 0;
635 case OMAP_DSS_VIDEO1: 693 case OMAP_DSS_VIDEO1:
636 case OMAP_DSS_VIDEO2: 694 case OMAP_DSS_VIDEO2:
637 return 0x0034 + i * 0x8; 695 return 0x0034 + i * 0x8;
@@ -639,6 +697,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
639 return 0x0010 + i * 0x8; 697 return 0x0010 + i * 0x8;
640 default: 698 default:
641 BUG(); 699 BUG();
700 return 0;
642 } 701 }
643} 702}
644 703
@@ -648,6 +707,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
648 switch (plane) { 707 switch (plane) {
649 case OMAP_DSS_GFX: 708 case OMAP_DSS_GFX:
650 BUG(); 709 BUG();
710 return 0;
651 case OMAP_DSS_VIDEO1: 711 case OMAP_DSS_VIDEO1:
652 return 0x058C + i * 0x8; 712 return 0x058C + i * 0x8;
653 case OMAP_DSS_VIDEO2: 713 case OMAP_DSS_VIDEO2:
@@ -656,6 +716,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
656 return 0x0430 + i * 0x8; 716 return 0x0430 + i * 0x8;
657 default: 717 default:
658 BUG(); 718 BUG();
719 return 0;
659 } 720 }
660} 721}
661 722
@@ -665,6 +726,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
665 switch (plane) { 726 switch (plane) {
666 case OMAP_DSS_GFX: 727 case OMAP_DSS_GFX:
667 BUG(); 728 BUG();
729 return 0;
668 case OMAP_DSS_VIDEO1: 730 case OMAP_DSS_VIDEO1:
669 case OMAP_DSS_VIDEO2: 731 case OMAP_DSS_VIDEO2:
670 return 0x0038 + i * 0x8; 732 return 0x0038 + i * 0x8;
@@ -672,6 +734,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
672 return 0x0014 + i * 0x8; 734 return 0x0014 + i * 0x8;
673 default: 735 default:
674 BUG(); 736 BUG();
737 return 0;
675 } 738 }
676} 739}
677 740
@@ -681,6 +744,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
681 switch (plane) { 744 switch (plane) {
682 case OMAP_DSS_GFX: 745 case OMAP_DSS_GFX:
683 BUG(); 746 BUG();
747 return 0;
684 case OMAP_DSS_VIDEO1: 748 case OMAP_DSS_VIDEO1:
685 return 0x0590 + i * 8; 749 return 0x0590 + i * 8;
686 case OMAP_DSS_VIDEO2: 750 case OMAP_DSS_VIDEO2:
@@ -689,6 +753,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
689 return 0x0434 + i * 0x8; 753 return 0x0434 + i * 0x8;
690 default: 754 default:
691 BUG(); 755 BUG();
756 return 0;
692 } 757 }
693} 758}
694 759
@@ -698,12 +763,14 @@ static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
698 switch (plane) { 763 switch (plane) {
699 case OMAP_DSS_GFX: 764 case OMAP_DSS_GFX:
700 BUG(); 765 BUG();
766 return 0;
701 case OMAP_DSS_VIDEO1: 767 case OMAP_DSS_VIDEO1:
702 case OMAP_DSS_VIDEO2: 768 case OMAP_DSS_VIDEO2:
703 case OMAP_DSS_VIDEO3: 769 case OMAP_DSS_VIDEO3:
704 return 0x0074 + i * 0x4; 770 return 0x0074 + i * 0x4;
705 default: 771 default:
706 BUG(); 772 BUG();
773 return 0;
707 } 774 }
708} 775}
709 776
@@ -713,6 +780,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
713 switch (plane) { 780 switch (plane) {
714 case OMAP_DSS_GFX: 781 case OMAP_DSS_GFX:
715 BUG(); 782 BUG();
783 return 0;
716 case OMAP_DSS_VIDEO1: 784 case OMAP_DSS_VIDEO1:
717 return 0x0124 + i * 0x4; 785 return 0x0124 + i * 0x4;
718 case OMAP_DSS_VIDEO2: 786 case OMAP_DSS_VIDEO2:
@@ -721,6 +789,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
721 return 0x0050 + i * 0x4; 789 return 0x0050 + i * 0x4;
722 default: 790 default:
723 BUG(); 791 BUG();
792 return 0;
724 } 793 }
725} 794}
726 795
@@ -730,6 +799,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
730 switch (plane) { 799 switch (plane) {
731 case OMAP_DSS_GFX: 800 case OMAP_DSS_GFX:
732 BUG(); 801 BUG();
802 return 0;
733 case OMAP_DSS_VIDEO1: 803 case OMAP_DSS_VIDEO1:
734 return 0x05CC + i * 0x4; 804 return 0x05CC + i * 0x4;
735 case OMAP_DSS_VIDEO2: 805 case OMAP_DSS_VIDEO2:
@@ -738,6 +808,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
738 return 0x0470 + i * 0x4; 808 return 0x0470 + i * 0x4;
739 default: 809 default:
740 BUG(); 810 BUG();
811 return 0;
741 } 812 }
742} 813}
743 814
@@ -754,6 +825,7 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
754 return 0x00A0; 825 return 0x00A0;
755 default: 826 default:
756 BUG(); 827 BUG();
828 return 0;
757 } 829 }
758} 830}
759#endif 831#endif
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 4424c198dbcd..249010630370 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -304,10 +304,18 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
304 return 24; 304 return 24;
305 default: 305 default:
306 BUG(); 306 BUG();
307 return 0;
307 } 308 }
308} 309}
309EXPORT_SYMBOL(omapdss_default_get_recommended_bpp); 310EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
310 311
312void omapdss_default_get_timings(struct omap_dss_device *dssdev,
313 struct omap_video_timings *timings)
314{
315 *timings = dssdev->panel.timings;
316}
317EXPORT_SYMBOL(omapdss_default_get_timings);
318
311/* Checks if replication logic should be used. Only use for active matrix, 319/* Checks if replication logic should be used. Only use for active matrix,
312 * when overlay is in RGB12U or RGB16 mode, and LCD interface is 320 * when overlay is in RGB12U or RGB16 mode, and LCD interface is
313 * 18bpp or 24bpp */ 321 * 18bpp or 24bpp */
@@ -340,6 +348,7 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
340 break; 348 break;
341 default: 349 default:
342 BUG(); 350 BUG();
351 return false;
343 } 352 }
344 353
345 return bpp > 16; 354 return bpp > 16;
@@ -352,46 +361,6 @@ void dss_init_device(struct platform_device *pdev,
352 int i; 361 int i;
353 int r; 362 int r;
354 363
355 switch (dssdev->type) {
356#ifdef CONFIG_OMAP2_DSS_DPI
357 case OMAP_DISPLAY_TYPE_DPI:
358 r = dpi_init_display(dssdev);
359 break;
360#endif
361#ifdef CONFIG_OMAP2_DSS_RFBI
362 case OMAP_DISPLAY_TYPE_DBI:
363 r = rfbi_init_display(dssdev);
364 break;
365#endif
366#ifdef CONFIG_OMAP2_DSS_VENC
367 case OMAP_DISPLAY_TYPE_VENC:
368 r = venc_init_display(dssdev);
369 break;
370#endif
371#ifdef CONFIG_OMAP2_DSS_SDI
372 case OMAP_DISPLAY_TYPE_SDI:
373 r = sdi_init_display(dssdev);
374 break;
375#endif
376#ifdef CONFIG_OMAP2_DSS_DSI
377 case OMAP_DISPLAY_TYPE_DSI:
378 r = dsi_init_display(dssdev);
379 break;
380#endif
381 case OMAP_DISPLAY_TYPE_HDMI:
382 r = hdmi_init_display(dssdev);
383 break;
384 default:
385 DSSERR("Support for display '%s' not compiled in.\n",
386 dssdev->name);
387 return;
388 }
389
390 if (r) {
391 DSSERR("failed to init display %s\n", dssdev->name);
392 return;
393 }
394
395 /* create device sysfs files */ 364 /* create device sysfs files */
396 i = 0; 365 i = 0;
397 while ((attr = display_sysfs_attrs[i++]) != NULL) { 366 while ((attr = display_sysfs_attrs[i++]) != NULL) {
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index faaf305fda27..8c2056c9537b 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -156,7 +156,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
156 t->pixel_clock = pck; 156 t->pixel_clock = pck;
157 } 157 }
158 158
159 dispc_mgr_set_lcd_timings(dssdev->manager->id, t); 159 dss_mgr_set_timings(dssdev->manager, t);
160 160
161 return 0; 161 return 0;
162} 162}
@@ -202,10 +202,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
202 goto err_reg_enable; 202 goto err_reg_enable;
203 } 203 }
204 204
205 r = dss_runtime_get();
206 if (r)
207 goto err_get_dss;
208
209 r = dispc_runtime_get(); 205 r = dispc_runtime_get();
210 if (r) 206 if (r)
211 goto err_get_dispc; 207 goto err_get_dispc;
@@ -244,8 +240,6 @@ err_dsi_pll_init:
244err_get_dsi: 240err_get_dsi:
245 dispc_runtime_put(); 241 dispc_runtime_put();
246err_get_dispc: 242err_get_dispc:
247 dss_runtime_put();
248err_get_dss:
249 if (cpu_is_omap34xx()) 243 if (cpu_is_omap34xx())
250 regulator_disable(dpi.vdds_dsi_reg); 244 regulator_disable(dpi.vdds_dsi_reg);
251err_reg_enable: 245err_reg_enable:
@@ -266,7 +260,6 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
266 } 260 }
267 261
268 dispc_runtime_put(); 262 dispc_runtime_put();
269 dss_runtime_put();
270 263
271 if (cpu_is_omap34xx()) 264 if (cpu_is_omap34xx())
272 regulator_disable(dpi.vdds_dsi_reg); 265 regulator_disable(dpi.vdds_dsi_reg);
@@ -283,21 +276,15 @@ void dpi_set_timings(struct omap_dss_device *dssdev,
283 DSSDBG("dpi_set_timings\n"); 276 DSSDBG("dpi_set_timings\n");
284 dssdev->panel.timings = *timings; 277 dssdev->panel.timings = *timings;
285 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { 278 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
286 r = dss_runtime_get();
287 if (r)
288 return;
289
290 r = dispc_runtime_get(); 279 r = dispc_runtime_get();
291 if (r) { 280 if (r)
292 dss_runtime_put();
293 return; 281 return;
294 }
295 282
296 dpi_set_mode(dssdev); 283 dpi_set_mode(dssdev);
297 dispc_mgr_go(dssdev->manager->id);
298 284
299 dispc_runtime_put(); 285 dispc_runtime_put();
300 dss_runtime_put(); 286 } else {
287 dss_mgr_set_timings(dssdev->manager, timings);
301 } 288 }
302} 289}
303EXPORT_SYMBOL(dpi_set_timings); 290EXPORT_SYMBOL(dpi_set_timings);
@@ -312,7 +299,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
312 unsigned long pck; 299 unsigned long pck;
313 struct dispc_clock_info dispc_cinfo; 300 struct dispc_clock_info dispc_cinfo;
314 301
315 if (!dispc_lcd_timings_ok(timings)) 302 if (dss_mgr_check_timings(dssdev->manager, timings))
316 return -EINVAL; 303 return -EINVAL;
317 304
318 if (timings->pixel_clock == 0) 305 if (timings->pixel_clock == 0)
@@ -352,7 +339,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
352} 339}
353EXPORT_SYMBOL(dpi_check_timings); 340EXPORT_SYMBOL(dpi_check_timings);
354 341
355int dpi_init_display(struct omap_dss_device *dssdev) 342static int __init dpi_init_display(struct omap_dss_device *dssdev)
356{ 343{
357 DSSDBG("init_display\n"); 344 DSSDBG("init_display\n");
358 345
@@ -378,12 +365,58 @@ int dpi_init_display(struct omap_dss_device *dssdev)
378 return 0; 365 return 0;
379} 366}
380 367
381int dpi_init(void) 368static void __init dpi_probe_pdata(struct platform_device *pdev)
382{ 369{
370 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
371 int i, r;
372
373 for (i = 0; i < pdata->num_devices; ++i) {
374 struct omap_dss_device *dssdev = pdata->devices[i];
375
376 if (dssdev->type != OMAP_DISPLAY_TYPE_DPI)
377 continue;
378
379 r = dpi_init_display(dssdev);
380 if (r) {
381 DSSERR("device %s init failed: %d\n", dssdev->name, r);
382 continue;
383 }
384
385 r = omap_dss_register_device(dssdev, &pdev->dev, i);
386 if (r)
387 DSSERR("device %s register failed: %d\n",
388 dssdev->name, r);
389 }
390}
391
392static int __init omap_dpi_probe(struct platform_device *pdev)
393{
394 dpi_probe_pdata(pdev);
395
396 return 0;
397}
398
399static int __exit omap_dpi_remove(struct platform_device *pdev)
400{
401 omap_dss_unregister_child_devices(&pdev->dev);
402
383 return 0; 403 return 0;
384} 404}
385 405
386void dpi_exit(void) 406static struct platform_driver omap_dpi_driver = {
407 .remove = __exit_p(omap_dpi_remove),
408 .driver = {
409 .name = "omapdss_dpi",
410 .owner = THIS_MODULE,
411 },
412};
413
414int __init dpi_init_platform_driver(void)
387{ 415{
416 return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe);
388} 417}
389 418
419void __exit dpi_uninit_platform_driver(void)
420{
421 platform_driver_unregister(&omap_dpi_driver);
422}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 210a3c4f6150..ca8382d346e9 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -256,14 +256,13 @@ struct dsi_data {
256 struct platform_device *pdev; 256 struct platform_device *pdev;
257 void __iomem *base; 257 void __iomem *base;
258 258
259 int module_id;
260
259 int irq; 261 int irq;
260 262
261 struct clk *dss_clk; 263 struct clk *dss_clk;
262 struct clk *sys_clk; 264 struct clk *sys_clk;
263 265
264 int (*enable_pads)(int dsi_id, unsigned lane_mask);
265 void (*disable_pads)(int dsi_id, unsigned lane_mask);
266
267 struct dsi_clock_info current_cinfo; 266 struct dsi_clock_info current_cinfo;
268 267
269 bool vdds_dsi_enabled; 268 bool vdds_dsi_enabled;
@@ -361,11 +360,6 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
361 return dsi_pdev_map[module]; 360 return dsi_pdev_map[module];
362} 361}
363 362
364static inline int dsi_get_dsidev_id(struct platform_device *dsidev)
365{
366 return dsidev->id;
367}
368
369static inline void dsi_write_reg(struct platform_device *dsidev, 363static inline void dsi_write_reg(struct platform_device *dsidev,
370 const struct dsi_reg idx, u32 val) 364 const struct dsi_reg idx, u32 val)
371{ 365{
@@ -452,6 +446,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
452 return 16; 446 return 16;
453 default: 447 default:
454 BUG(); 448 BUG();
449 return 0;
455 } 450 }
456} 451}
457 452
@@ -1184,10 +1179,9 @@ static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1184static unsigned long dsi_fclk_rate(struct platform_device *dsidev) 1179static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1185{ 1180{
1186 unsigned long r; 1181 unsigned long r;
1187 int dsi_module = dsi_get_dsidev_id(dsidev);
1188 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1182 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1189 1183
1190 if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) { 1184 if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
1191 /* DSI FCLK source is DSS_CLK_FCK */ 1185 /* DSI FCLK source is DSS_CLK_FCK */
1192 r = clk_get_rate(dsi->dss_clk); 1186 r = clk_get_rate(dsi->dss_clk);
1193 } else { 1187 } else {
@@ -1279,10 +1273,9 @@ static int dsi_pll_power(struct platform_device *dsidev,
1279} 1273}
1280 1274
1281/* calculate clock rates using dividers in cinfo */ 1275/* calculate clock rates using dividers in cinfo */
1282static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, 1276static int dsi_calc_clock_rates(struct platform_device *dsidev,
1283 struct dsi_clock_info *cinfo) 1277 struct dsi_clock_info *cinfo)
1284{ 1278{
1285 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1286 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1279 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1287 1280
1288 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max) 1281 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
@@ -1297,21 +1290,8 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1297 if (cinfo->regm_dsi > dsi->regm_dsi_max) 1290 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1298 return -EINVAL; 1291 return -EINVAL;
1299 1292
1300 if (cinfo->use_sys_clk) { 1293 cinfo->clkin = clk_get_rate(dsi->sys_clk);
1301 cinfo->clkin = clk_get_rate(dsi->sys_clk); 1294 cinfo->fint = cinfo->clkin / cinfo->regn;
1302 /* XXX it is unclear if highfreq should be used
1303 * with DSS_SYS_CLK source also */
1304 cinfo->highfreq = 0;
1305 } else {
1306 cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id);
1307
1308 if (cinfo->clkin < 32000000)
1309 cinfo->highfreq = 0;
1310 else
1311 cinfo->highfreq = 1;
1312 }
1313
1314 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1315 1295
1316 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min) 1296 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1317 return -EINVAL; 1297 return -EINVAL;
@@ -1378,27 +1358,21 @@ retry:
1378 1358
1379 memset(&cur, 0, sizeof(cur)); 1359 memset(&cur, 0, sizeof(cur));
1380 cur.clkin = dss_sys_clk; 1360 cur.clkin = dss_sys_clk;
1381 cur.use_sys_clk = 1;
1382 cur.highfreq = 0;
1383 1361
1384 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */ 1362 /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
1385 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1386 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */ 1363 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1387 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) { 1364 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1388 if (cur.highfreq == 0) 1365 cur.fint = cur.clkin / cur.regn;
1389 cur.fint = cur.clkin / cur.regn;
1390 else
1391 cur.fint = cur.clkin / (2 * cur.regn);
1392 1366
1393 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min) 1367 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1394 continue; 1368 continue;
1395 1369
1396 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */ 1370 /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
1397 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) { 1371 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1398 unsigned long a, b; 1372 unsigned long a, b;
1399 1373
1400 a = 2 * cur.regm * (cur.clkin/1000); 1374 a = 2 * cur.regm * (cur.clkin/1000);
1401 b = cur.regn * (cur.highfreq + 1); 1375 b = cur.regn;
1402 cur.clkin4ddr = a / b * 1000; 1376 cur.clkin4ddr = a / b * 1000;
1403 1377
1404 if (cur.clkin4ddr > 1800 * 1000 * 1000) 1378 if (cur.clkin4ddr > 1800 * 1000 * 1000)
@@ -1486,9 +1460,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1486 1460
1487 DSSDBGF(); 1461 DSSDBGF();
1488 1462
1489 dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk; 1463 dsi->current_cinfo.clkin = cinfo->clkin;
1490 dsi->current_cinfo.highfreq = cinfo->highfreq;
1491
1492 dsi->current_cinfo.fint = cinfo->fint; 1464 dsi->current_cinfo.fint = cinfo->fint;
1493 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr; 1465 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1494 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk = 1466 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
@@ -1503,17 +1475,13 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1503 1475
1504 DSSDBG("DSI Fint %ld\n", cinfo->fint); 1476 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1505 1477
1506 DSSDBG("clkin (%s) rate %ld, highfreq %d\n", 1478 DSSDBG("clkin rate %ld\n", cinfo->clkin);
1507 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
1508 cinfo->clkin,
1509 cinfo->highfreq);
1510 1479
1511 /* DSIPHY == CLKIN4DDR */ 1480 /* DSIPHY == CLKIN4DDR */
1512 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n", 1481 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
1513 cinfo->regm, 1482 cinfo->regm,
1514 cinfo->regn, 1483 cinfo->regn,
1515 cinfo->clkin, 1484 cinfo->clkin,
1516 cinfo->highfreq + 1,
1517 cinfo->clkin4ddr); 1485 cinfo->clkin4ddr);
1518 1486
1519 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n", 1487 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
@@ -1568,10 +1536,6 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1568 1536
1569 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) 1537 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1570 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ 1538 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1571 l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
1572 11, 11); /* DSI_PLL_CLKSEL */
1573 l = FLD_MOD(l, cinfo->highfreq,
1574 12, 12); /* DSI_PLL_HIGHFREQ */
1575 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ 1539 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1576 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ 1540 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1577 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ 1541 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
@@ -1716,7 +1680,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1716 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1680 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1717 struct dsi_clock_info *cinfo = &dsi->current_cinfo; 1681 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1718 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; 1682 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1719 int dsi_module = dsi_get_dsidev_id(dsidev); 1683 int dsi_module = dsi->module_id;
1720 1684
1721 dispc_clk_src = dss_get_dispc_clk_source(); 1685 dispc_clk_src = dss_get_dispc_clk_source();
1722 dsi_clk_src = dss_get_dsi_clk_source(dsi_module); 1686 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
@@ -1726,8 +1690,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1726 1690
1727 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); 1691 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1728 1692
1729 seq_printf(s, "dsi pll source = %s\n", 1693 seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin);
1730 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
1731 1694
1732 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn); 1695 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1733 1696
@@ -1789,7 +1752,6 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1789 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1752 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1790 unsigned long flags; 1753 unsigned long flags;
1791 struct dsi_irq_stats stats; 1754 struct dsi_irq_stats stats;
1792 int dsi_module = dsi_get_dsidev_id(dsidev);
1793 1755
1794 spin_lock_irqsave(&dsi->irq_stats_lock, flags); 1756 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1795 1757
@@ -1806,7 +1768,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1806#define PIS(x) \ 1768#define PIS(x) \
1807 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); 1769 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1808 1770
1809 seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1); 1771 seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
1810 PIS(VC0); 1772 PIS(VC0);
1811 PIS(VC1); 1773 PIS(VC1);
1812 PIS(VC2); 1774 PIS(VC2);
@@ -1886,22 +1848,6 @@ static void dsi2_dump_irqs(struct seq_file *s)
1886 1848
1887 dsi_dump_dsidev_irqs(dsidev, s); 1849 dsi_dump_dsidev_irqs(dsidev, s);
1888} 1850}
1889
1890void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
1891 const struct file_operations *debug_fops)
1892{
1893 struct platform_device *dsidev;
1894
1895 dsidev = dsi_get_dsidev_from_id(0);
1896 if (dsidev)
1897 debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
1898 &dsi1_dump_irqs, debug_fops);
1899
1900 dsidev = dsi_get_dsidev_from_id(1);
1901 if (dsidev)
1902 debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
1903 &dsi2_dump_irqs, debug_fops);
1904}
1905#endif 1851#endif
1906 1852
1907static void dsi_dump_dsidev_regs(struct platform_device *dsidev, 1853static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
@@ -2002,21 +1948,6 @@ static void dsi2_dump_regs(struct seq_file *s)
2002 dsi_dump_dsidev_regs(dsidev, s); 1948 dsi_dump_dsidev_regs(dsidev, s);
2003} 1949}
2004 1950
2005void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
2006 const struct file_operations *debug_fops)
2007{
2008 struct platform_device *dsidev;
2009
2010 dsidev = dsi_get_dsidev_from_id(0);
2011 if (dsidev)
2012 debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
2013 &dsi1_dump_regs, debug_fops);
2014
2015 dsidev = dsi_get_dsidev_from_id(1);
2016 if (dsidev)
2017 debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
2018 &dsi2_dump_regs, debug_fops);
2019}
2020enum dsi_cio_power_state { 1951enum dsi_cio_power_state {
2021 DSI_COMPLEXIO_POWER_OFF = 0x0, 1952 DSI_COMPLEXIO_POWER_OFF = 0x0,
2022 DSI_COMPLEXIO_POWER_ON = 0x1, 1953 DSI_COMPLEXIO_POWER_ON = 0x1,
@@ -2073,6 +2004,7 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2073 return 1365 * 3; /* 1365x24 bits */ 2004 return 1365 * 3; /* 1365x24 bits */
2074 default: 2005 default:
2075 BUG(); 2006 BUG();
2007 return 0;
2076 } 2008 }
2077} 2009}
2078 2010
@@ -2337,7 +2269,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
2337 2269
2338 DSSDBGF(); 2270 DSSDBGF();
2339 2271
2340 r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev)); 2272 r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
2341 if (r) 2273 if (r)
2342 return r; 2274 return r;
2343 2275
@@ -2447,7 +2379,7 @@ err_cio_pwr:
2447 dsi_cio_disable_lane_override(dsidev); 2379 dsi_cio_disable_lane_override(dsidev);
2448err_scp_clk_dom: 2380err_scp_clk_dom:
2449 dsi_disable_scp_clk(dsidev); 2381 dsi_disable_scp_clk(dsidev);
2450 dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev)); 2382 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
2451 return r; 2383 return r;
2452} 2384}
2453 2385
@@ -2461,7 +2393,7 @@ static void dsi_cio_uninit(struct omap_dss_device *dssdev)
2461 2393
2462 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); 2394 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2463 dsi_disable_scp_clk(dsidev); 2395 dsi_disable_scp_clk(dsidev);
2464 dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev)); 2396 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
2465} 2397}
2466 2398
2467static void dsi_config_tx_fifo(struct platform_device *dsidev, 2399static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2485,6 +2417,7 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
2485 if (add + size > 4) { 2417 if (add + size > 4) {
2486 DSSERR("Illegal FIFO configuration\n"); 2418 DSSERR("Illegal FIFO configuration\n");
2487 BUG(); 2419 BUG();
2420 return;
2488 } 2421 }
2489 2422
2490 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); 2423 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2517,6 +2450,7 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
2517 if (add + size > 4) { 2450 if (add + size > 4) {
2518 DSSERR("Illegal FIFO configuration\n"); 2451 DSSERR("Illegal FIFO configuration\n");
2519 BUG(); 2452 BUG();
2453 return;
2520 } 2454 }
2521 2455
2522 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); 2456 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2658,6 +2592,7 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2658 return dsi_sync_vc_l4(dsidev, channel); 2592 return dsi_sync_vc_l4(dsidev, channel);
2659 default: 2593 default:
2660 BUG(); 2594 BUG();
2595 return -EINVAL;
2661 } 2596 }
2662} 2597}
2663 2598
@@ -3226,6 +3161,7 @@ static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev,
3226 data = reqdata[0] | (reqdata[1] << 8); 3161 data = reqdata[0] | (reqdata[1] << 8);
3227 } else { 3162 } else {
3228 BUG(); 3163 BUG();
3164 return -EINVAL;
3229 } 3165 }
3230 3166
3231 r = dsi_vc_send_short(dsidev, channel, data_type, data, 0); 3167 r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
@@ -3340,7 +3276,6 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
3340 goto err; 3276 goto err;
3341 } 3277 }
3342 3278
3343 BUG();
3344err: 3279err:
3345 DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel, 3280 DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
3346 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS"); 3281 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
@@ -3735,6 +3670,186 @@ static void dsi_config_blanking_modes(struct omap_dss_device *dssdev)
3735 dsi_write_reg(dsidev, DSI_CTRL, r); 3670 dsi_write_reg(dsidev, DSI_CTRL, r);
3736} 3671}
3737 3672
3673/*
3674 * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
3675 * results in maximum transition time for data and clock lanes to enter and
3676 * exit HS mode. Hence, this is the scenario where the least amount of command
3677 * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
3678 * clock cycles that can be used to interleave command mode data in HS so that
3679 * all scenarios are satisfied.
3680 */
3681static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
3682 int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
3683{
3684 int transition;
3685
3686 /*
3687 * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
3688 * time of data lanes only, if it isn't set, we need to consider HS
3689 * transition time of both data and clock lanes. HS transition time
3690 * of Scenario 3 is considered.
3691 */
3692 if (ddr_alwon) {
3693 transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
3694 } else {
3695 int trans1, trans2;
3696 trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
3697 trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
3698 enter_hs + 1;
3699 transition = max(trans1, trans2);
3700 }
3701
3702 return blank > transition ? blank - transition : 0;
3703}
3704
3705/*
3706 * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
3707 * results in maximum transition time for data lanes to enter and exit LP mode.
3708 * Hence, this is the scenario where the least amount of command mode data can
3709 * be interleaved. We program the minimum amount of bytes that can be
3710 * interleaved in LP so that all scenarios are satisfied.
3711 */
3712static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
3713 int lp_clk_div, int tdsi_fclk)
3714{
3715 int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */
3716 int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */
3717 int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */
3718 int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
3719 int lp_inter; /* cmd mode data that can be interleaved, in bytes */
3720
3721 /* maximum LP transition time according to Scenario 1 */
3722 trans_lp = exit_hs + max(enter_hs, 2) + 1;
3723
3724 /* CLKIN4DDR = 16 * TXBYTECLKHS */
3725 tlp_avail = thsbyte_clk * (blank - trans_lp);
3726
3727 ttxclkesc = tdsi_fclk * lp_clk_div;
3728
3729 lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
3730 26) / 16;
3731
3732 return max(lp_inter, 0);
3733}
3734
3735static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
3736{
3737 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3738 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3739 int blanking_mode;
3740 int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
3741 int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
3742 int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
3743 int tclk_trail, ths_exit, exiths_clk;
3744 bool ddr_alwon;
3745 struct omap_video_timings *timings = &dssdev->panel.timings;
3746 int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
3747 int ndl = dsi->num_lanes_used - 1;
3748 int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
3749 int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
3750 int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
3751 int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
3752 int bl_interleave_hs = 0, bl_interleave_lp = 0;
3753 u32 r;
3754
3755 r = dsi_read_reg(dsidev, DSI_CTRL);
3756 blanking_mode = FLD_GET(r, 20, 20);
3757 hfp_blanking_mode = FLD_GET(r, 21, 21);
3758 hbp_blanking_mode = FLD_GET(r, 22, 22);
3759 hsa_blanking_mode = FLD_GET(r, 23, 23);
3760
3761 r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
3762 hbp = FLD_GET(r, 11, 0);
3763 hfp = FLD_GET(r, 23, 12);
3764 hsa = FLD_GET(r, 31, 24);
3765
3766 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3767 ddr_clk_post = FLD_GET(r, 7, 0);
3768 ddr_clk_pre = FLD_GET(r, 15, 8);
3769
3770 r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
3771 exit_hs_mode_lat = FLD_GET(r, 15, 0);
3772 enter_hs_mode_lat = FLD_GET(r, 31, 16);
3773
3774 r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
3775 lp_clk_div = FLD_GET(r, 12, 0);
3776 ddr_alwon = FLD_GET(r, 13, 13);
3777
3778 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3779 ths_exit = FLD_GET(r, 7, 0);
3780
3781 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3782 tclk_trail = FLD_GET(r, 15, 8);
3783
3784 exiths_clk = ths_exit + tclk_trail;
3785
3786 width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
3787 bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
3788
3789 if (!hsa_blanking_mode) {
3790 hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
3791 enter_hs_mode_lat, exit_hs_mode_lat,
3792 exiths_clk, ddr_clk_pre, ddr_clk_post);
3793 hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
3794 enter_hs_mode_lat, exit_hs_mode_lat,
3795 lp_clk_div, dsi_fclk_hsdiv);
3796 }
3797
3798 if (!hfp_blanking_mode) {
3799 hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
3800 enter_hs_mode_lat, exit_hs_mode_lat,
3801 exiths_clk, ddr_clk_pre, ddr_clk_post);
3802 hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
3803 enter_hs_mode_lat, exit_hs_mode_lat,
3804 lp_clk_div, dsi_fclk_hsdiv);
3805 }
3806
3807 if (!hbp_blanking_mode) {
3808 hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
3809 enter_hs_mode_lat, exit_hs_mode_lat,
3810 exiths_clk, ddr_clk_pre, ddr_clk_post);
3811
3812 hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
3813 enter_hs_mode_lat, exit_hs_mode_lat,
3814 lp_clk_div, dsi_fclk_hsdiv);
3815 }
3816
3817 if (!blanking_mode) {
3818 bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
3819 enter_hs_mode_lat, exit_hs_mode_lat,
3820 exiths_clk, ddr_clk_pre, ddr_clk_post);
3821
3822 bl_interleave_lp = dsi_compute_interleave_lp(bllp,
3823 enter_hs_mode_lat, exit_hs_mode_lat,
3824 lp_clk_div, dsi_fclk_hsdiv);
3825 }
3826
3827 DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
3828 hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
3829 bl_interleave_hs);
3830
3831 DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
3832 hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
3833 bl_interleave_lp);
3834
3835 r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
3836 r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
3837 r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
3838 r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
3839 dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
3840
3841 r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
3842 r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
3843 r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
3844 r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
3845 dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
3846
3847 r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
3848 r = FLD_MOD(r, bl_interleave_hs, 31, 15);
3849 r = FLD_MOD(r, bl_interleave_lp, 16, 0);
3850 dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
3851}
3852
3738static int dsi_proto_config(struct omap_dss_device *dssdev) 3853static int dsi_proto_config(struct omap_dss_device *dssdev)
3739{ 3854{
3740 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3855 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3769,6 +3884,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
3769 break; 3884 break;
3770 default: 3885 default:
3771 BUG(); 3886 BUG();
3887 return -EINVAL;
3772 } 3888 }
3773 3889
3774 r = dsi_read_reg(dsidev, DSI_CTRL); 3890 r = dsi_read_reg(dsidev, DSI_CTRL);
@@ -3793,6 +3909,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
3793 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { 3909 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
3794 dsi_config_vp_sync_events(dssdev); 3910 dsi_config_vp_sync_events(dssdev);
3795 dsi_config_blanking_modes(dssdev); 3911 dsi_config_blanking_modes(dssdev);
3912 dsi_config_cmd_mode_interleaving(dssdev);
3796 } 3913 }
3797 3914
3798 dsi_vc_initial_config(dsidev, 0); 3915 dsi_vc_initial_config(dsidev, 0);
@@ -4008,6 +4125,7 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
4008 break; 4125 break;
4009 default: 4126 default:
4010 BUG(); 4127 BUG();
4128 return -EINVAL;
4011 }; 4129 };
4012 4130
4013 dsi_if_enable(dsidev, false); 4131 dsi_if_enable(dsidev, false);
@@ -4192,10 +4310,6 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
4192 __cancel_delayed_work(&dsi->framedone_timeout_work); 4310 __cancel_delayed_work(&dsi->framedone_timeout_work);
4193 4311
4194 dsi_handle_framedone(dsidev, 0); 4312 dsi_handle_framedone(dsidev, 0);
4195
4196#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
4197 dispc_fake_vsync_irq();
4198#endif
4199} 4313}
4200 4314
4201int omap_dsi_update(struct omap_dss_device *dssdev, int channel, 4315int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
@@ -4259,13 +4373,12 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4259 dispc_mgr_enable_stallmode(dssdev->manager->id, true); 4373 dispc_mgr_enable_stallmode(dssdev->manager->id, true);
4260 dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1); 4374 dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
4261 4375
4262 dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings); 4376 dss_mgr_set_timings(dssdev->manager, &timings);
4263 } else { 4377 } else {
4264 dispc_mgr_enable_stallmode(dssdev->manager->id, false); 4378 dispc_mgr_enable_stallmode(dssdev->manager->id, false);
4265 dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0); 4379 dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0);
4266 4380
4267 dispc_mgr_set_lcd_timings(dssdev->manager->id, 4381 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
4268 &dssdev->panel.timings);
4269 } 4382 }
4270 4383
4271 dispc_mgr_set_lcd_display_type(dssdev->manager->id, 4384 dispc_mgr_set_lcd_display_type(dssdev->manager->id,
@@ -4294,13 +4407,11 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
4294 struct dsi_clock_info cinfo; 4407 struct dsi_clock_info cinfo;
4295 int r; 4408 int r;
4296 4409
4297 /* we always use DSS_CLK_SYSCK as input clock */
4298 cinfo.use_sys_clk = true;
4299 cinfo.regn = dssdev->clocks.dsi.regn; 4410 cinfo.regn = dssdev->clocks.dsi.regn;
4300 cinfo.regm = dssdev->clocks.dsi.regm; 4411 cinfo.regm = dssdev->clocks.dsi.regm;
4301 cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc; 4412 cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
4302 cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi; 4413 cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
4303 r = dsi_calc_clock_rates(dssdev, &cinfo); 4414 r = dsi_calc_clock_rates(dsidev, &cinfo);
4304 if (r) { 4415 if (r) {
4305 DSSERR("Failed to calc dsi clocks\n"); 4416 DSSERR("Failed to calc dsi clocks\n");
4306 return r; 4417 return r;
@@ -4345,7 +4456,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
4345static int dsi_display_init_dsi(struct omap_dss_device *dssdev) 4456static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4346{ 4457{
4347 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4458 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4348 int dsi_module = dsi_get_dsidev_id(dsidev); 4459 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4349 int r; 4460 int r;
4350 4461
4351 r = dsi_pll_init(dsidev, true, true); 4462 r = dsi_pll_init(dsidev, true, true);
@@ -4357,7 +4468,7 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4357 goto err1; 4468 goto err1;
4358 4469
4359 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); 4470 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
4360 dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src); 4471 dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
4361 dss_select_lcd_clk_source(dssdev->manager->id, 4472 dss_select_lcd_clk_source(dssdev->manager->id,
4362 dssdev->clocks.dispc.channel.lcd_clk_src); 4473 dssdev->clocks.dispc.channel.lcd_clk_src);
4363 4474
@@ -4396,7 +4507,7 @@ err3:
4396 dsi_cio_uninit(dssdev); 4507 dsi_cio_uninit(dssdev);
4397err2: 4508err2:
4398 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 4509 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4399 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK); 4510 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4400 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK); 4511 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
4401 4512
4402err1: 4513err1:
@@ -4410,7 +4521,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4410{ 4521{
4411 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4522 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4412 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4523 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4413 int dsi_module = dsi_get_dsidev_id(dsidev);
4414 4524
4415 if (enter_ulps && !dsi->ulps_enabled) 4525 if (enter_ulps && !dsi->ulps_enabled)
4416 dsi_enter_ulps(dsidev); 4526 dsi_enter_ulps(dsidev);
@@ -4423,7 +4533,7 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4423 dsi_vc_enable(dsidev, 3, 0); 4533 dsi_vc_enable(dsidev, 3, 0);
4424 4534
4425 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 4535 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4426 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK); 4536 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4427 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK); 4537 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
4428 dsi_cio_uninit(dssdev); 4538 dsi_cio_uninit(dssdev);
4429 dsi_pll_uninit(dsidev, disconnect_lanes); 4539 dsi_pll_uninit(dsidev, disconnect_lanes);
@@ -4527,7 +4637,7 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4527} 4637}
4528EXPORT_SYMBOL(omapdss_dsi_enable_te); 4638EXPORT_SYMBOL(omapdss_dsi_enable_te);
4529 4639
4530int dsi_init_display(struct omap_dss_device *dssdev) 4640static int __init dsi_init_display(struct omap_dss_device *dssdev)
4531{ 4641{
4532 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4642 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4533 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4643 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4680,13 +4790,39 @@ static void dsi_put_clocks(struct platform_device *dsidev)
4680 clk_put(dsi->sys_clk); 4790 clk_put(dsi->sys_clk);
4681} 4791}
4682 4792
4793static void __init dsi_probe_pdata(struct platform_device *dsidev)
4794{
4795 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4796 struct omap_dss_board_info *pdata = dsidev->dev.platform_data;
4797 int i, r;
4798
4799 for (i = 0; i < pdata->num_devices; ++i) {
4800 struct omap_dss_device *dssdev = pdata->devices[i];
4801
4802 if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
4803 continue;
4804
4805 if (dssdev->phy.dsi.module != dsi->module_id)
4806 continue;
4807
4808 r = dsi_init_display(dssdev);
4809 if (r) {
4810 DSSERR("device %s init failed: %d\n", dssdev->name, r);
4811 continue;
4812 }
4813
4814 r = omap_dss_register_device(dssdev, &dsidev->dev, i);
4815 if (r)
4816 DSSERR("device %s register failed: %d\n",
4817 dssdev->name, r);
4818 }
4819}
4820
4683/* DSI1 HW IP initialisation */ 4821/* DSI1 HW IP initialisation */
4684static int omap_dsihw_probe(struct platform_device *dsidev) 4822static int __init omap_dsihw_probe(struct platform_device *dsidev)
4685{ 4823{
4686 struct omap_display_platform_data *dss_plat_data;
4687 struct omap_dss_board_info *board_info;
4688 u32 rev; 4824 u32 rev;
4689 int r, i, dsi_module = dsi_get_dsidev_id(dsidev); 4825 int r, i;
4690 struct resource *dsi_mem; 4826 struct resource *dsi_mem;
4691 struct dsi_data *dsi; 4827 struct dsi_data *dsi;
4692 4828
@@ -4694,15 +4830,11 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
4694 if (!dsi) 4830 if (!dsi)
4695 return -ENOMEM; 4831 return -ENOMEM;
4696 4832
4833 dsi->module_id = dsidev->id;
4697 dsi->pdev = dsidev; 4834 dsi->pdev = dsidev;
4698 dsi_pdev_map[dsi_module] = dsidev; 4835 dsi_pdev_map[dsi->module_id] = dsidev;
4699 dev_set_drvdata(&dsidev->dev, dsi); 4836 dev_set_drvdata(&dsidev->dev, dsi);
4700 4837
4701 dss_plat_data = dsidev->dev.platform_data;
4702 board_info = dss_plat_data->board_data;
4703 dsi->enable_pads = board_info->dsi_enable_pads;
4704 dsi->disable_pads = board_info->dsi_disable_pads;
4705
4706 spin_lock_init(&dsi->irq_lock); 4838 spin_lock_init(&dsi->irq_lock);
4707 spin_lock_init(&dsi->errors_lock); 4839 spin_lock_init(&dsi->errors_lock);
4708 dsi->errors = 0; 4840 dsi->errors = 0;
@@ -4780,8 +4912,21 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
4780 else 4912 else
4781 dsi->num_lanes_supported = 3; 4913 dsi->num_lanes_supported = 3;
4782 4914
4915 dsi_probe_pdata(dsidev);
4916
4783 dsi_runtime_put(dsidev); 4917 dsi_runtime_put(dsidev);
4784 4918
4919 if (dsi->module_id == 0)
4920 dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
4921 else if (dsi->module_id == 1)
4922 dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
4923
4924#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
4925 if (dsi->module_id == 0)
4926 dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
4927 else if (dsi->module_id == 1)
4928 dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
4929#endif
4785 return 0; 4930 return 0;
4786 4931
4787err_runtime_get: 4932err_runtime_get:
@@ -4790,12 +4935,14 @@ err_runtime_get:
4790 return r; 4935 return r;
4791} 4936}
4792 4937
4793static int omap_dsihw_remove(struct platform_device *dsidev) 4938static int __exit omap_dsihw_remove(struct platform_device *dsidev)
4794{ 4939{
4795 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4940 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4796 4941
4797 WARN_ON(dsi->scp_clk_refcount > 0); 4942 WARN_ON(dsi->scp_clk_refcount > 0);
4798 4943
4944 omap_dss_unregister_child_devices(&dsidev->dev);
4945
4799 pm_runtime_disable(&dsidev->dev); 4946 pm_runtime_disable(&dsidev->dev);
4800 4947
4801 dsi_put_clocks(dsidev); 4948 dsi_put_clocks(dsidev);
@@ -4816,7 +4963,6 @@ static int omap_dsihw_remove(struct platform_device *dsidev)
4816static int dsi_runtime_suspend(struct device *dev) 4963static int dsi_runtime_suspend(struct device *dev)
4817{ 4964{
4818 dispc_runtime_put(); 4965 dispc_runtime_put();
4819 dss_runtime_put();
4820 4966
4821 return 0; 4967 return 0;
4822} 4968}
@@ -4825,20 +4971,11 @@ static int dsi_runtime_resume(struct device *dev)
4825{ 4971{
4826 int r; 4972 int r;
4827 4973
4828 r = dss_runtime_get();
4829 if (r)
4830 goto err_get_dss;
4831
4832 r = dispc_runtime_get(); 4974 r = dispc_runtime_get();
4833 if (r) 4975 if (r)
4834 goto err_get_dispc; 4976 return r;
4835 4977
4836 return 0; 4978 return 0;
4837
4838err_get_dispc:
4839 dss_runtime_put();
4840err_get_dss:
4841 return r;
4842} 4979}
4843 4980
4844static const struct dev_pm_ops dsi_pm_ops = { 4981static const struct dev_pm_ops dsi_pm_ops = {
@@ -4847,8 +4984,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
4847}; 4984};
4848 4985
4849static struct platform_driver omap_dsihw_driver = { 4986static struct platform_driver omap_dsihw_driver = {
4850 .probe = omap_dsihw_probe, 4987 .remove = __exit_p(omap_dsihw_remove),
4851 .remove = omap_dsihw_remove,
4852 .driver = { 4988 .driver = {
4853 .name = "omapdss_dsi", 4989 .name = "omapdss_dsi",
4854 .owner = THIS_MODULE, 4990 .owner = THIS_MODULE,
@@ -4856,12 +4992,12 @@ static struct platform_driver omap_dsihw_driver = {
4856 }, 4992 },
4857}; 4993};
4858 4994
4859int dsi_init_platform_driver(void) 4995int __init dsi_init_platform_driver(void)
4860{ 4996{
4861 return platform_driver_register(&omap_dsihw_driver); 4997 return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
4862} 4998}
4863 4999
4864void dsi_uninit_platform_driver(void) 5000void __exit dsi_uninit_platform_driver(void)
4865{ 5001{
4866 return platform_driver_unregister(&omap_dsihw_driver); 5002 platform_driver_unregister(&omap_dsihw_driver);
4867} 5003}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index bd2d5e159463..770632359a17 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -62,6 +62,9 @@ struct dss_reg {
62#define REG_FLD_MOD(idx, val, start, end) \ 62#define REG_FLD_MOD(idx, val, start, end) \
63 dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end)) 63 dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
64 64
65static int dss_runtime_get(void);
66static void dss_runtime_put(void);
67
65static struct { 68static struct {
66 struct platform_device *pdev; 69 struct platform_device *pdev;
67 void __iomem *base; 70 void __iomem *base;
@@ -277,7 +280,7 @@ void dss_dump_clocks(struct seq_file *s)
277 dss_runtime_put(); 280 dss_runtime_put();
278} 281}
279 282
280void dss_dump_regs(struct seq_file *s) 283static void dss_dump_regs(struct seq_file *s)
281{ 284{
282#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r)) 285#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
283 286
@@ -322,6 +325,7 @@ void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
322 break; 325 break;
323 default: 326 default:
324 BUG(); 327 BUG();
328 return;
325 } 329 }
326 330
327 dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end); 331 dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
@@ -335,7 +339,7 @@ void dss_select_dsi_clk_source(int dsi_module,
335 enum omap_dss_clk_source clk_src) 339 enum omap_dss_clk_source clk_src)
336{ 340{
337 struct platform_device *dsidev; 341 struct platform_device *dsidev;
338 int b; 342 int b, pos;
339 343
340 switch (clk_src) { 344 switch (clk_src) {
341 case OMAP_DSS_CLK_SRC_FCK: 345 case OMAP_DSS_CLK_SRC_FCK:
@@ -355,9 +359,11 @@ void dss_select_dsi_clk_source(int dsi_module,
355 break; 359 break;
356 default: 360 default:
357 BUG(); 361 BUG();
362 return;
358 } 363 }
359 364
360 REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */ 365 pos = dsi_module == 0 ? 1 : 10;
366 REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */
361 367
362 dss.dsi_clk_source[dsi_module] = clk_src; 368 dss.dsi_clk_source[dsi_module] = clk_src;
363} 369}
@@ -389,6 +395,7 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
389 break; 395 break;
390 default: 396 default:
391 BUG(); 397 BUG();
398 return;
392 } 399 }
393 400
394 pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12; 401 pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
@@ -706,7 +713,7 @@ static void dss_put_clocks(void)
706 clk_put(dss.dss_clk); 713 clk_put(dss.dss_clk);
707} 714}
708 715
709int dss_runtime_get(void) 716static int dss_runtime_get(void)
710{ 717{
711 int r; 718 int r;
712 719
@@ -717,14 +724,14 @@ int dss_runtime_get(void)
717 return r < 0 ? r : 0; 724 return r < 0 ? r : 0;
718} 725}
719 726
720void dss_runtime_put(void) 727static void dss_runtime_put(void)
721{ 728{
722 int r; 729 int r;
723 730
724 DSSDBG("dss_runtime_put\n"); 731 DSSDBG("dss_runtime_put\n");
725 732
726 r = pm_runtime_put_sync(&dss.pdev->dev); 733 r = pm_runtime_put_sync(&dss.pdev->dev);
727 WARN_ON(r < 0); 734 WARN_ON(r < 0 && r != -EBUSY);
728} 735}
729 736
730/* DEBUGFS */ 737/* DEBUGFS */
@@ -740,7 +747,7 @@ void dss_debug_dump_clocks(struct seq_file *s)
740#endif 747#endif
741 748
742/* DSS HW IP initialisation */ 749/* DSS HW IP initialisation */
743static int omap_dsshw_probe(struct platform_device *pdev) 750static int __init omap_dsshw_probe(struct platform_device *pdev)
744{ 751{
745 struct resource *dss_mem; 752 struct resource *dss_mem;
746 u32 rev; 753 u32 rev;
@@ -785,40 +792,24 @@ static int omap_dsshw_probe(struct platform_device *pdev)
785 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; 792 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
786 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; 793 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
787 794
788 r = dpi_init();
789 if (r) {
790 DSSERR("Failed to initialize DPI\n");
791 goto err_dpi;
792 }
793
794 r = sdi_init();
795 if (r) {
796 DSSERR("Failed to initialize SDI\n");
797 goto err_sdi;
798 }
799
800 rev = dss_read_reg(DSS_REVISION); 795 rev = dss_read_reg(DSS_REVISION);
801 printk(KERN_INFO "OMAP DSS rev %d.%d\n", 796 printk(KERN_INFO "OMAP DSS rev %d.%d\n",
802 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); 797 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
803 798
804 dss_runtime_put(); 799 dss_runtime_put();
805 800
801 dss_debugfs_create_file("dss", dss_dump_regs);
802
806 return 0; 803 return 0;
807err_sdi: 804
808 dpi_exit();
809err_dpi:
810 dss_runtime_put();
811err_runtime_get: 805err_runtime_get:
812 pm_runtime_disable(&pdev->dev); 806 pm_runtime_disable(&pdev->dev);
813 dss_put_clocks(); 807 dss_put_clocks();
814 return r; 808 return r;
815} 809}
816 810
817static int omap_dsshw_remove(struct platform_device *pdev) 811static int __exit omap_dsshw_remove(struct platform_device *pdev)
818{ 812{
819 dpi_exit();
820 sdi_exit();
821
822 pm_runtime_disable(&pdev->dev); 813 pm_runtime_disable(&pdev->dev);
823 814
824 dss_put_clocks(); 815 dss_put_clocks();
@@ -829,11 +820,24 @@ static int omap_dsshw_remove(struct platform_device *pdev)
829static int dss_runtime_suspend(struct device *dev) 820static int dss_runtime_suspend(struct device *dev)
830{ 821{
831 dss_save_context(); 822 dss_save_context();
823 dss_set_min_bus_tput(dev, 0);
832 return 0; 824 return 0;
833} 825}
834 826
835static int dss_runtime_resume(struct device *dev) 827static int dss_runtime_resume(struct device *dev)
836{ 828{
829 int r;
830 /*
831 * Set an arbitrarily high tput request to ensure OPP100.
832 * What we should really do is to make a request to stay in OPP100,
833 * without any tput requirements, but that is not currently possible
834 * via the PM layer.
835 */
836
837 r = dss_set_min_bus_tput(dev, 1000000000);
838 if (r)
839 return r;
840
837 dss_restore_context(); 841 dss_restore_context();
838 return 0; 842 return 0;
839} 843}
@@ -844,8 +848,7 @@ static const struct dev_pm_ops dss_pm_ops = {
844}; 848};
845 849
846static struct platform_driver omap_dsshw_driver = { 850static struct platform_driver omap_dsshw_driver = {
847 .probe = omap_dsshw_probe, 851 .remove = __exit_p(omap_dsshw_remove),
848 .remove = omap_dsshw_remove,
849 .driver = { 852 .driver = {
850 .name = "omapdss_dss", 853 .name = "omapdss_dss",
851 .owner = THIS_MODULE, 854 .owner = THIS_MODULE,
@@ -853,12 +856,12 @@ static struct platform_driver omap_dsshw_driver = {
853 }, 856 },
854}; 857};
855 858
856int dss_init_platform_driver(void) 859int __init dss_init_platform_driver(void)
857{ 860{
858 return platform_driver_register(&omap_dsshw_driver); 861 return platform_driver_probe(&omap_dsshw_driver, omap_dsshw_probe);
859} 862}
860 863
861void dss_uninit_platform_driver(void) 864void dss_uninit_platform_driver(void)
862{ 865{
863 return platform_driver_unregister(&omap_dsshw_driver); 866 platform_driver_unregister(&omap_dsshw_driver);
864} 867}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index d4b3dff2ead3..dd1092ceaeef 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -150,9 +150,6 @@ struct dsi_clock_info {
150 u16 regm_dsi; /* OMAP3: REGM4 150 u16 regm_dsi; /* OMAP3: REGM4
151 * OMAP4: REGM5 */ 151 * OMAP4: REGM5 */
152 u16 lp_clk_div; 152 u16 lp_clk_div;
153
154 u8 highfreq;
155 bool use_sys_clk;
156}; 153};
157 154
158struct seq_file; 155struct seq_file;
@@ -162,6 +159,16 @@ struct platform_device;
162struct bus_type *dss_get_bus(void); 159struct bus_type *dss_get_bus(void);
163struct regulator *dss_get_vdds_dsi(void); 160struct regulator *dss_get_vdds_dsi(void);
164struct regulator *dss_get_vdds_sdi(void); 161struct regulator *dss_get_vdds_sdi(void);
162int dss_get_ctx_loss_count(struct device *dev);
163int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
164void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
165int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
166int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
167
168int omap_dss_register_device(struct omap_dss_device *dssdev,
169 struct device *parent, int disp_num);
170void omap_dss_unregister_device(struct omap_dss_device *dssdev);
171void omap_dss_unregister_child_devices(struct device *parent);
165 172
166/* apply */ 173/* apply */
167void dss_apply_init(void); 174void dss_apply_init(void);
@@ -179,6 +186,9 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
179int dss_mgr_set_device(struct omap_overlay_manager *mgr, 186int dss_mgr_set_device(struct omap_overlay_manager *mgr,
180 struct omap_dss_device *dssdev); 187 struct omap_dss_device *dssdev);
181int dss_mgr_unset_device(struct omap_overlay_manager *mgr); 188int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
189void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
190 struct omap_video_timings *timings);
191const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
182 192
183bool dss_ovl_is_enabled(struct omap_overlay *ovl); 193bool dss_ovl_is_enabled(struct omap_overlay *ovl);
184int dss_ovl_enable(struct omap_overlay *ovl); 194int dss_ovl_enable(struct omap_overlay *ovl);
@@ -208,9 +218,11 @@ int dss_init_overlay_managers(struct platform_device *pdev);
208void dss_uninit_overlay_managers(struct platform_device *pdev); 218void dss_uninit_overlay_managers(struct platform_device *pdev);
209int dss_mgr_simple_check(struct omap_overlay_manager *mgr, 219int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
210 const struct omap_overlay_manager_info *info); 220 const struct omap_overlay_manager_info *info);
221int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
222 const struct omap_video_timings *timings);
211int dss_mgr_check(struct omap_overlay_manager *mgr, 223int dss_mgr_check(struct omap_overlay_manager *mgr,
212 struct omap_dss_device *dssdev,
213 struct omap_overlay_manager_info *info, 224 struct omap_overlay_manager_info *info,
225 const struct omap_video_timings *mgr_timings,
214 struct omap_overlay_info **overlay_infos); 226 struct omap_overlay_info **overlay_infos);
215 227
216/* overlay */ 228/* overlay */
@@ -220,22 +232,18 @@ void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
220void dss_recheck_connections(struct omap_dss_device *dssdev, bool force); 232void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
221int dss_ovl_simple_check(struct omap_overlay *ovl, 233int dss_ovl_simple_check(struct omap_overlay *ovl,
222 const struct omap_overlay_info *info); 234 const struct omap_overlay_info *info);
223int dss_ovl_check(struct omap_overlay *ovl, 235int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
224 struct omap_overlay_info *info, struct omap_dss_device *dssdev); 236 const struct omap_video_timings *mgr_timings);
225 237
226/* DSS */ 238/* DSS */
227int dss_init_platform_driver(void); 239int dss_init_platform_driver(void) __init;
228void dss_uninit_platform_driver(void); 240void dss_uninit_platform_driver(void);
229 241
230int dss_runtime_get(void);
231void dss_runtime_put(void);
232
233void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); 242void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
234enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); 243enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
235const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); 244const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
236void dss_dump_clocks(struct seq_file *s); 245void dss_dump_clocks(struct seq_file *s);
237 246
238void dss_dump_regs(struct seq_file *s);
239#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) 247#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
240void dss_debug_dump_clocks(struct seq_file *s); 248void dss_debug_dump_clocks(struct seq_file *s);
241#endif 249#endif
@@ -265,19 +273,8 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
265 struct dispc_clock_info *dispc_cinfo); 273 struct dispc_clock_info *dispc_cinfo);
266 274
267/* SDI */ 275/* SDI */
268#ifdef CONFIG_OMAP2_DSS_SDI 276int sdi_init_platform_driver(void) __init;
269int sdi_init(void); 277void sdi_uninit_platform_driver(void) __exit;
270void sdi_exit(void);
271int sdi_init_display(struct omap_dss_device *display);
272#else
273static inline int sdi_init(void)
274{
275 return 0;
276}
277static inline void sdi_exit(void)
278{
279}
280#endif
281 278
282/* DSI */ 279/* DSI */
283#ifdef CONFIG_OMAP2_DSS_DSI 280#ifdef CONFIG_OMAP2_DSS_DSI
@@ -285,19 +282,14 @@ static inline void sdi_exit(void)
285struct dentry; 282struct dentry;
286struct file_operations; 283struct file_operations;
287 284
288int dsi_init_platform_driver(void); 285int dsi_init_platform_driver(void) __init;
289void dsi_uninit_platform_driver(void); 286void dsi_uninit_platform_driver(void) __exit;
290 287
291int dsi_runtime_get(struct platform_device *dsidev); 288int dsi_runtime_get(struct platform_device *dsidev);
292void dsi_runtime_put(struct platform_device *dsidev); 289void dsi_runtime_put(struct platform_device *dsidev);
293 290
294void dsi_dump_clocks(struct seq_file *s); 291void dsi_dump_clocks(struct seq_file *s);
295void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
296 const struct file_operations *debug_fops);
297void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
298 const struct file_operations *debug_fops);
299 292
300int dsi_init_display(struct omap_dss_device *display);
301void dsi_irq_handler(void); 293void dsi_irq_handler(void);
302u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt); 294u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
303 295
@@ -314,13 +306,6 @@ void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
314void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev); 306void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
315struct platform_device *dsi_get_dsidev_from_id(int module); 307struct platform_device *dsi_get_dsidev_from_id(int module);
316#else 308#else
317static inline int dsi_init_platform_driver(void)
318{
319 return 0;
320}
321static inline void dsi_uninit_platform_driver(void)
322{
323}
324static inline int dsi_runtime_get(struct platform_device *dsidev) 309static inline int dsi_runtime_get(struct platform_device *dsidev)
325{ 310{
326 return 0; 311 return 0;
@@ -377,28 +362,14 @@ static inline struct platform_device *dsi_get_dsidev_from_id(int module)
377#endif 362#endif
378 363
379/* DPI */ 364/* DPI */
380#ifdef CONFIG_OMAP2_DSS_DPI 365int dpi_init_platform_driver(void) __init;
381int dpi_init(void); 366void dpi_uninit_platform_driver(void) __exit;
382void dpi_exit(void);
383int dpi_init_display(struct omap_dss_device *dssdev);
384#else
385static inline int dpi_init(void)
386{
387 return 0;
388}
389static inline void dpi_exit(void)
390{
391}
392#endif
393 367
394/* DISPC */ 368/* DISPC */
395int dispc_init_platform_driver(void); 369int dispc_init_platform_driver(void) __init;
396void dispc_uninit_platform_driver(void); 370void dispc_uninit_platform_driver(void) __exit;
397void dispc_dump_clocks(struct seq_file *s); 371void dispc_dump_clocks(struct seq_file *s);
398void dispc_dump_irqs(struct seq_file *s);
399void dispc_dump_regs(struct seq_file *s);
400void dispc_irq_handler(void); 372void dispc_irq_handler(void);
401void dispc_fake_vsync_irq(void);
402 373
403int dispc_runtime_get(void); 374int dispc_runtime_get(void);
404void dispc_runtime_put(void); 375void dispc_runtime_put(void);
@@ -409,12 +380,12 @@ void dispc_disable_sidle(void);
409void dispc_lcd_enable_signal_polarity(bool act_high); 380void dispc_lcd_enable_signal_polarity(bool act_high);
410void dispc_lcd_enable_signal(bool enable); 381void dispc_lcd_enable_signal(bool enable);
411void dispc_pck_free_enable(bool enable); 382void dispc_pck_free_enable(bool enable);
412void dispc_set_digit_size(u16 width, u16 height);
413void dispc_enable_fifomerge(bool enable); 383void dispc_enable_fifomerge(bool enable);
414void dispc_enable_gamma_table(bool enable); 384void dispc_enable_gamma_table(bool enable);
415void dispc_set_loadmode(enum omap_dss_load_mode mode); 385void dispc_set_loadmode(enum omap_dss_load_mode mode);
416 386
417bool dispc_lcd_timings_ok(struct omap_video_timings *timings); 387bool dispc_mgr_timings_ok(enum omap_channel channel,
388 const struct omap_video_timings *timings);
418unsigned long dispc_fclk_rate(void); 389unsigned long dispc_fclk_rate(void);
419void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, 390void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
420 struct dispc_clock_info *cinfo); 391 struct dispc_clock_info *cinfo);
@@ -424,15 +395,16 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
424 395
425void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high); 396void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
426void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, 397void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
427 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge); 398 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
399 bool manual_update);
428int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, 400int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
429 bool ilace, bool replication); 401 bool ilace, bool replication,
402 const struct omap_video_timings *mgr_timings);
430int dispc_ovl_enable(enum omap_plane plane, bool enable); 403int dispc_ovl_enable(enum omap_plane plane, bool enable);
431void dispc_ovl_set_channel_out(enum omap_plane plane, 404void dispc_ovl_set_channel_out(enum omap_plane plane,
432 enum omap_channel channel); 405 enum omap_channel channel);
433 406
434void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable); 407void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
435void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
436u32 dispc_mgr_get_vsync_irq(enum omap_channel channel); 408u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
437u32 dispc_mgr_get_framedone_irq(enum omap_channel channel); 409u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
438bool dispc_mgr_go_busy(enum omap_channel channel); 410bool dispc_mgr_go_busy(enum omap_channel channel);
@@ -445,12 +417,13 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
445void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines); 417void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
446void dispc_mgr_set_lcd_display_type(enum omap_channel channel, 418void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
447 enum omap_lcd_display_type type); 419 enum omap_lcd_display_type type);
448void dispc_mgr_set_lcd_timings(enum omap_channel channel, 420void dispc_mgr_set_timings(enum omap_channel channel,
449 struct omap_video_timings *timings); 421 struct omap_video_timings *timings);
450void dispc_mgr_set_pol_freq(enum omap_channel channel, 422void dispc_mgr_set_pol_freq(enum omap_channel channel,
451 enum omap_panel_config config, u8 acbi, u8 acb); 423 enum omap_panel_config config, u8 acbi, u8 acb);
452unsigned long dispc_mgr_lclk_rate(enum omap_channel channel); 424unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
453unsigned long dispc_mgr_pclk_rate(enum omap_channel channel); 425unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
426unsigned long dispc_core_clk_rate(void);
454int dispc_mgr_set_clock_div(enum omap_channel channel, 427int dispc_mgr_set_clock_div(enum omap_channel channel,
455 struct dispc_clock_info *cinfo); 428 struct dispc_clock_info *cinfo);
456int dispc_mgr_get_clock_div(enum omap_channel channel, 429int dispc_mgr_get_clock_div(enum omap_channel channel,
@@ -460,19 +433,10 @@ void dispc_mgr_setup(enum omap_channel channel,
460 433
461/* VENC */ 434/* VENC */
462#ifdef CONFIG_OMAP2_DSS_VENC 435#ifdef CONFIG_OMAP2_DSS_VENC
463int venc_init_platform_driver(void); 436int venc_init_platform_driver(void) __init;
464void venc_uninit_platform_driver(void); 437void venc_uninit_platform_driver(void) __exit;
465void venc_dump_regs(struct seq_file *s);
466int venc_init_display(struct omap_dss_device *display);
467unsigned long venc_get_pixel_clock(void); 438unsigned long venc_get_pixel_clock(void);
468#else 439#else
469static inline int venc_init_platform_driver(void)
470{
471 return 0;
472}
473static inline void venc_uninit_platform_driver(void)
474{
475}
476static inline unsigned long venc_get_pixel_clock(void) 440static inline unsigned long venc_get_pixel_clock(void)
477{ 441{
478 WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__); 442 WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__);
@@ -482,23 +446,10 @@ static inline unsigned long venc_get_pixel_clock(void)
482 446
483/* HDMI */ 447/* HDMI */
484#ifdef CONFIG_OMAP4_DSS_HDMI 448#ifdef CONFIG_OMAP4_DSS_HDMI
485int hdmi_init_platform_driver(void); 449int hdmi_init_platform_driver(void) __init;
486void hdmi_uninit_platform_driver(void); 450void hdmi_uninit_platform_driver(void) __exit;
487int hdmi_init_display(struct omap_dss_device *dssdev);
488unsigned long hdmi_get_pixel_clock(void); 451unsigned long hdmi_get_pixel_clock(void);
489void hdmi_dump_regs(struct seq_file *s);
490#else 452#else
491static inline int hdmi_init_display(struct omap_dss_device *dssdev)
492{
493 return 0;
494}
495static inline int hdmi_init_platform_driver(void)
496{
497 return 0;
498}
499static inline void hdmi_uninit_platform_driver(void)
500{
501}
502static inline unsigned long hdmi_get_pixel_clock(void) 453static inline unsigned long hdmi_get_pixel_clock(void)
503{ 454{
504 WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__); 455 WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__);
@@ -514,22 +465,18 @@ int omapdss_hdmi_read_edid(u8 *buf, int len);
514bool omapdss_hdmi_detect(void); 465bool omapdss_hdmi_detect(void);
515int hdmi_panel_init(void); 466int hdmi_panel_init(void);
516void hdmi_panel_exit(void); 467void hdmi_panel_exit(void);
468#ifdef CONFIG_OMAP4_DSS_HDMI_AUDIO
469int hdmi_audio_enable(void);
470void hdmi_audio_disable(void);
471int hdmi_audio_start(void);
472void hdmi_audio_stop(void);
473bool hdmi_mode_has_audio(void);
474int hdmi_audio_config(struct omap_dss_audio *audio);
475#endif
517 476
518/* RFBI */ 477/* RFBI */
519#ifdef CONFIG_OMAP2_DSS_RFBI 478int rfbi_init_platform_driver(void) __init;
520int rfbi_init_platform_driver(void); 479void rfbi_uninit_platform_driver(void) __exit;
521void rfbi_uninit_platform_driver(void);
522void rfbi_dump_regs(struct seq_file *s);
523int rfbi_init_display(struct omap_dss_device *display);
524#else
525static inline int rfbi_init_platform_driver(void)
526{
527 return 0;
528}
529static inline void rfbi_uninit_platform_driver(void)
530{
531}
532#endif
533 480
534 481
535#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 482#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index ce14aa6dd672..938709724f0c 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -52,6 +52,8 @@ struct omap_dss_features {
52 const char * const *clksrc_names; 52 const char * const *clksrc_names;
53 const struct dss_param_range *dss_params; 53 const struct dss_param_range *dss_params;
54 54
55 const enum omap_dss_rotation_type supported_rotation_types;
56
55 const u32 buffer_size_unit; 57 const u32 buffer_size_unit;
56 const u32 burst_size_unit; 58 const u32 burst_size_unit;
57}; 59};
@@ -311,6 +313,8 @@ static const struct dss_param_range omap2_dss_param_range[] = {
311 * scaler cannot scale a image with width more than 768. 313 * scaler cannot scale a image with width more than 768.
312 */ 314 */
313 [FEAT_PARAM_LINEWIDTH] = { 1, 768 }, 315 [FEAT_PARAM_LINEWIDTH] = { 1, 768 },
316 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
317 [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
314}; 318};
315 319
316static const struct dss_param_range omap3_dss_param_range[] = { 320static const struct dss_param_range omap3_dss_param_range[] = {
@@ -324,6 +328,8 @@ static const struct dss_param_range omap3_dss_param_range[] = {
324 [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1}, 328 [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
325 [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, 329 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
326 [FEAT_PARAM_LINEWIDTH] = { 1, 1024 }, 330 [FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
331 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
332 [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
327}; 333};
328 334
329static const struct dss_param_range omap4_dss_param_range[] = { 335static const struct dss_param_range omap4_dss_param_range[] = {
@@ -337,6 +343,8 @@ static const struct dss_param_range omap4_dss_param_range[] = {
337 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 }, 343 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
338 [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, 344 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
339 [FEAT_PARAM_LINEWIDTH] = { 1, 2048 }, 345 [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
346 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
347 [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
340}; 348};
341 349
342static const enum dss_feat_id omap2_dss_feat_list[] = { 350static const enum dss_feat_id omap2_dss_feat_list[] = {
@@ -399,6 +407,7 @@ static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
399 FEAT_FIR_COEF_V, 407 FEAT_FIR_COEF_V,
400 FEAT_ALPHA_FREE_ZORDER, 408 FEAT_ALPHA_FREE_ZORDER,
401 FEAT_FIFO_MERGE, 409 FEAT_FIFO_MERGE,
410 FEAT_BURST_2D,
402}; 411};
403 412
404static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = { 413static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
@@ -416,6 +425,7 @@ static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
416 FEAT_FIR_COEF_V, 425 FEAT_FIR_COEF_V,
417 FEAT_ALPHA_FREE_ZORDER, 426 FEAT_ALPHA_FREE_ZORDER,
418 FEAT_FIFO_MERGE, 427 FEAT_FIFO_MERGE,
428 FEAT_BURST_2D,
419}; 429};
420 430
421static const enum dss_feat_id omap4_dss_feat_list[] = { 431static const enum dss_feat_id omap4_dss_feat_list[] = {
@@ -434,6 +444,7 @@ static const enum dss_feat_id omap4_dss_feat_list[] = {
434 FEAT_FIR_COEF_V, 444 FEAT_FIR_COEF_V,
435 FEAT_ALPHA_FREE_ZORDER, 445 FEAT_ALPHA_FREE_ZORDER,
436 FEAT_FIFO_MERGE, 446 FEAT_FIFO_MERGE,
447 FEAT_BURST_2D,
437}; 448};
438 449
439/* OMAP2 DSS Features */ 450/* OMAP2 DSS Features */
@@ -451,6 +462,7 @@ static const struct omap_dss_features omap2_dss_features = {
451 .overlay_caps = omap2_dss_overlay_caps, 462 .overlay_caps = omap2_dss_overlay_caps,
452 .clksrc_names = omap2_dss_clk_source_names, 463 .clksrc_names = omap2_dss_clk_source_names,
453 .dss_params = omap2_dss_param_range, 464 .dss_params = omap2_dss_param_range,
465 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
454 .buffer_size_unit = 1, 466 .buffer_size_unit = 1,
455 .burst_size_unit = 8, 467 .burst_size_unit = 8,
456}; 468};
@@ -470,6 +482,7 @@ static const struct omap_dss_features omap3430_dss_features = {
470 .overlay_caps = omap3430_dss_overlay_caps, 482 .overlay_caps = omap3430_dss_overlay_caps,
471 .clksrc_names = omap3_dss_clk_source_names, 483 .clksrc_names = omap3_dss_clk_source_names,
472 .dss_params = omap3_dss_param_range, 484 .dss_params = omap3_dss_param_range,
485 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
473 .buffer_size_unit = 1, 486 .buffer_size_unit = 1,
474 .burst_size_unit = 8, 487 .burst_size_unit = 8,
475}; 488};
@@ -488,6 +501,7 @@ static const struct omap_dss_features omap3630_dss_features = {
488 .overlay_caps = omap3630_dss_overlay_caps, 501 .overlay_caps = omap3630_dss_overlay_caps,
489 .clksrc_names = omap3_dss_clk_source_names, 502 .clksrc_names = omap3_dss_clk_source_names,
490 .dss_params = omap3_dss_param_range, 503 .dss_params = omap3_dss_param_range,
504 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
491 .buffer_size_unit = 1, 505 .buffer_size_unit = 1,
492 .burst_size_unit = 8, 506 .burst_size_unit = 8,
493}; 507};
@@ -508,6 +522,7 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
508 .overlay_caps = omap4_dss_overlay_caps, 522 .overlay_caps = omap4_dss_overlay_caps,
509 .clksrc_names = omap4_dss_clk_source_names, 523 .clksrc_names = omap4_dss_clk_source_names,
510 .dss_params = omap4_dss_param_range, 524 .dss_params = omap4_dss_param_range,
525 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
511 .buffer_size_unit = 16, 526 .buffer_size_unit = 16,
512 .burst_size_unit = 16, 527 .burst_size_unit = 16,
513}; 528};
@@ -527,6 +542,7 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
527 .overlay_caps = omap4_dss_overlay_caps, 542 .overlay_caps = omap4_dss_overlay_caps,
528 .clksrc_names = omap4_dss_clk_source_names, 543 .clksrc_names = omap4_dss_clk_source_names,
529 .dss_params = omap4_dss_param_range, 544 .dss_params = omap4_dss_param_range,
545 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
530 .buffer_size_unit = 16, 546 .buffer_size_unit = 16,
531 .burst_size_unit = 16, 547 .burst_size_unit = 16,
532}; 548};
@@ -546,6 +562,7 @@ static const struct omap_dss_features omap4_dss_features = {
546 .overlay_caps = omap4_dss_overlay_caps, 562 .overlay_caps = omap4_dss_overlay_caps,
547 .clksrc_names = omap4_dss_clk_source_names, 563 .clksrc_names = omap4_dss_clk_source_names,
548 .dss_params = omap4_dss_param_range, 564 .dss_params = omap4_dss_param_range,
565 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
549 .buffer_size_unit = 16, 566 .buffer_size_unit = 16,
550 .burst_size_unit = 16, 567 .burst_size_unit = 16,
551}; 568};
@@ -562,13 +579,17 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
562 .pll_enable = ti_hdmi_4xxx_pll_enable, 579 .pll_enable = ti_hdmi_4xxx_pll_enable,
563 .pll_disable = ti_hdmi_4xxx_pll_disable, 580 .pll_disable = ti_hdmi_4xxx_pll_disable,
564 .video_enable = ti_hdmi_4xxx_wp_video_start, 581 .video_enable = ti_hdmi_4xxx_wp_video_start,
582 .video_disable = ti_hdmi_4xxx_wp_video_stop,
565 .dump_wrapper = ti_hdmi_4xxx_wp_dump, 583 .dump_wrapper = ti_hdmi_4xxx_wp_dump,
566 .dump_core = ti_hdmi_4xxx_core_dump, 584 .dump_core = ti_hdmi_4xxx_core_dump,
567 .dump_pll = ti_hdmi_4xxx_pll_dump, 585 .dump_pll = ti_hdmi_4xxx_pll_dump,
568 .dump_phy = ti_hdmi_4xxx_phy_dump, 586 .dump_phy = ti_hdmi_4xxx_phy_dump,
569#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 587#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
570 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
571 .audio_enable = ti_hdmi_4xxx_wp_audio_enable, 588 .audio_enable = ti_hdmi_4xxx_wp_audio_enable,
589 .audio_disable = ti_hdmi_4xxx_wp_audio_disable,
590 .audio_start = ti_hdmi_4xxx_audio_start,
591 .audio_stop = ti_hdmi_4xxx_audio_stop,
592 .audio_config = ti_hdmi_4xxx_audio_config,
572#endif 593#endif
573 594
574}; 595};
@@ -662,6 +683,11 @@ void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
662 *end = omap_current_dss_features->reg_fields[id].end; 683 *end = omap_current_dss_features->reg_fields[id].end;
663} 684}
664 685
686bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type)
687{
688 return omap_current_dss_features->supported_rotation_types & rot_type;
689}
690
665void dss_features_init(void) 691void dss_features_init(void)
666{ 692{
667 if (cpu_is_omap24xx()) 693 if (cpu_is_omap24xx())
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index c332e7ddfce1..bdf469f080e7 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -62,6 +62,7 @@ enum dss_feat_id {
62 FEAT_FIFO_MERGE, 62 FEAT_FIFO_MERGE,
63 /* An unknown HW bug causing the normal FIFO thresholds not to work */ 63 /* An unknown HW bug causing the normal FIFO thresholds not to work */
64 FEAT_OMAP3_DSI_FIFO_BUG, 64 FEAT_OMAP3_DSI_FIFO_BUG,
65 FEAT_BURST_2D,
65}; 66};
66 67
67/* DSS register field id */ 68/* DSS register field id */
@@ -91,6 +92,8 @@ enum dss_range_param {
91 FEAT_PARAM_DSIPLL_LPDIV, 92 FEAT_PARAM_DSIPLL_LPDIV,
92 FEAT_PARAM_DOWNSCALE, 93 FEAT_PARAM_DOWNSCALE,
93 FEAT_PARAM_LINEWIDTH, 94 FEAT_PARAM_LINEWIDTH,
95 FEAT_PARAM_MGR_WIDTH,
96 FEAT_PARAM_MGR_HEIGHT,
94}; 97};
95 98
96/* DSS Feature Functions */ 99/* DSS Feature Functions */
@@ -108,6 +111,8 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
108u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ 111u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
109u32 dss_feat_get_burst_size_unit(void); /* in bytes */ 112u32 dss_feat_get_burst_size_unit(void); /* in bytes */
110 113
114bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
115
111bool dss_has_feature(enum dss_feat_id id); 116bool dss_has_feature(enum dss_feat_id id);
112void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); 117void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
113void dss_features_init(void); 118void dss_features_init(void);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index c4b4f6950a92..8195c7166d20 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -33,12 +33,6 @@
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <video/omapdss.h> 35#include <video/omapdss.h>
36#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
37 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
38#include <sound/soc.h>
39#include <sound/pcm_params.h>
40#include "ti_hdmi_4xxx_ip.h"
41#endif
42 36
43#include "ti_hdmi.h" 37#include "ti_hdmi.h"
44#include "dss.h" 38#include "dss.h"
@@ -63,7 +57,6 @@
63 57
64static struct { 58static struct {
65 struct mutex lock; 59 struct mutex lock;
66 struct omap_display_platform_data *pdata;
67 struct platform_device *pdev; 60 struct platform_device *pdev;
68 struct hdmi_ip_data ip_data; 61 struct hdmi_ip_data ip_data;
69 62
@@ -130,25 +123,12 @@ static int hdmi_runtime_get(void)
130 123
131 DSSDBG("hdmi_runtime_get\n"); 124 DSSDBG("hdmi_runtime_get\n");
132 125
133 /*
134 * HACK: Add dss_runtime_get() to ensure DSS clock domain is enabled.
135 * This should be removed later.
136 */
137 r = dss_runtime_get();
138 if (r < 0)
139 goto err_get_dss;
140
141 r = pm_runtime_get_sync(&hdmi.pdev->dev); 126 r = pm_runtime_get_sync(&hdmi.pdev->dev);
142 WARN_ON(r < 0); 127 WARN_ON(r < 0);
143 if (r < 0) 128 if (r < 0)
144 goto err_get_hdmi; 129 return r;
145 130
146 return 0; 131 return 0;
147
148err_get_hdmi:
149 dss_runtime_put();
150err_get_dss:
151 return r;
152} 132}
153 133
154static void hdmi_runtime_put(void) 134static void hdmi_runtime_put(void)
@@ -159,15 +139,9 @@ static void hdmi_runtime_put(void)
159 139
160 r = pm_runtime_put_sync(&hdmi.pdev->dev); 140 r = pm_runtime_put_sync(&hdmi.pdev->dev);
161 WARN_ON(r < 0); 141 WARN_ON(r < 0);
162
163 /*
164 * HACK: This is added to complement the dss_runtime_get() call in
165 * hdmi_runtime_get(). This should be removed later.
166 */
167 dss_runtime_put();
168} 142}
169 143
170int hdmi_init_display(struct omap_dss_device *dssdev) 144static int __init hdmi_init_display(struct omap_dss_device *dssdev)
171{ 145{
172 DSSDBG("init_display\n"); 146 DSSDBG("init_display\n");
173 147
@@ -344,7 +318,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
344 318
345 hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data); 319 hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
346 320
347 hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0); 321 hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
348 322
349 /* config the PLL and PHY hdmi_set_pll_pwrfirst */ 323 /* config the PLL and PHY hdmi_set_pll_pwrfirst */
350 r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data); 324 r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
@@ -376,10 +350,11 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
376 dispc_enable_gamma_table(0); 350 dispc_enable_gamma_table(0);
377 351
378 /* tv size */ 352 /* tv size */
379 dispc_set_digit_size(dssdev->panel.timings.x_res, 353 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
380 dssdev->panel.timings.y_res);
381 354
382 hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1); 355 r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
356 if (r)
357 goto err_vid_enable;
383 358
384 r = dss_mgr_enable(dssdev->manager); 359 r = dss_mgr_enable(dssdev->manager);
385 if (r) 360 if (r)
@@ -388,7 +363,8 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
388 return 0; 363 return 0;
389 364
390err_mgr_enable: 365err_mgr_enable:
391 hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0); 366 hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
367err_vid_enable:
392 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data); 368 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
393 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data); 369 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
394err: 370err:
@@ -400,7 +376,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
400{ 376{
401 dss_mgr_disable(dssdev->manager); 377 dss_mgr_disable(dssdev->manager);
402 378
403 hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0); 379 hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
404 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data); 380 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
405 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data); 381 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
406 hdmi_runtime_put(); 382 hdmi_runtime_put();
@@ -436,10 +412,12 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
436 r = hdmi_power_on(dssdev); 412 r = hdmi_power_on(dssdev);
437 if (r) 413 if (r)
438 DSSERR("failed to power on device\n"); 414 DSSERR("failed to power on device\n");
415 } else {
416 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
439 } 417 }
440} 418}
441 419
442void hdmi_dump_regs(struct seq_file *s) 420static void hdmi_dump_regs(struct seq_file *s)
443{ 421{
444 mutex_lock(&hdmi.lock); 422 mutex_lock(&hdmi.lock);
445 423
@@ -555,248 +533,201 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
555 mutex_unlock(&hdmi.lock); 533 mutex_unlock(&hdmi.lock);
556} 534}
557 535
558#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 536static int hdmi_get_clocks(struct platform_device *pdev)
559 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
560
561static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
562 struct snd_soc_dai *dai)
563{ 537{
564 struct snd_soc_pcm_runtime *rtd = substream->private_data; 538 struct clk *clk;
565 struct snd_soc_codec *codec = rtd->codec;
566 struct platform_device *pdev = to_platform_device(codec->dev);
567 struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
568 int err = 0;
569 539
570 if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) { 540 clk = clk_get(&pdev->dev, "sys_clk");
571 dev_err(&pdev->dev, "Cannot enable/disable audio\n"); 541 if (IS_ERR(clk)) {
572 return -ENODEV; 542 DSSERR("can't get sys_clk\n");
543 return PTR_ERR(clk);
573 } 544 }
574 545
575 switch (cmd) { 546 hdmi.sys_clk = clk;
576 case SNDRV_PCM_TRIGGER_START: 547
577 case SNDRV_PCM_TRIGGER_RESUME: 548 return 0;
578 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 549}
579 ip_data->ops->audio_enable(ip_data, true); 550
580 break; 551static void hdmi_put_clocks(void)
581 case SNDRV_PCM_TRIGGER_STOP: 552{
582 case SNDRV_PCM_TRIGGER_SUSPEND: 553 if (hdmi.sys_clk)
583 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 554 clk_put(hdmi.sys_clk);
584 ip_data->ops->audio_enable(ip_data, false); 555}
585 break; 556
586 default: 557#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
587 err = -EINVAL; 558int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
588 } 559{
589 return err; 560 u32 deep_color;
590} 561 bool deep_color_correct = false;
591 562 u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
592static int hdmi_audio_hw_params(struct snd_pcm_substream *substream, 563
593 struct snd_pcm_hw_params *params, 564 if (n == NULL || cts == NULL)
594 struct snd_soc_dai *dai)
595{
596 struct snd_soc_pcm_runtime *rtd = substream->private_data;
597 struct snd_soc_codec *codec = rtd->codec;
598 struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
599 struct hdmi_audio_format audio_format;
600 struct hdmi_audio_dma audio_dma;
601 struct hdmi_core_audio_config core_cfg;
602 struct hdmi_core_infoframe_audio aud_if_cfg;
603 int err, n, cts;
604 enum hdmi_core_audio_sample_freq sample_freq;
605
606 switch (params_format(params)) {
607 case SNDRV_PCM_FORMAT_S16_LE:
608 core_cfg.i2s_cfg.word_max_length =
609 HDMI_AUDIO_I2S_MAX_WORD_20BITS;
610 core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
611 core_cfg.i2s_cfg.in_length_bits =
612 HDMI_AUDIO_I2S_INPUT_LENGTH_16;
613 core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
614 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
615 audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
616 audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
617 audio_dma.transfer_size = 0x10;
618 break;
619 case SNDRV_PCM_FORMAT_S24_LE:
620 core_cfg.i2s_cfg.word_max_length =
621 HDMI_AUDIO_I2S_MAX_WORD_24BITS;
622 core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
623 core_cfg.i2s_cfg.in_length_bits =
624 HDMI_AUDIO_I2S_INPUT_LENGTH_24;
625 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
626 audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
627 audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
628 core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
629 audio_dma.transfer_size = 0x20;
630 break;
631 default:
632 return -EINVAL; 565 return -EINVAL;
633 }
634 566
635 switch (params_rate(params)) { 567 /* TODO: When implemented, query deep color mode here. */
568 deep_color = 100;
569
570 /*
571 * When using deep color, the default N value (as in the HDMI
572 * specification) yields to an non-integer CTS. Hence, we
573 * modify it while keeping the restrictions described in
574 * section 7.2.1 of the HDMI 1.4a specification.
575 */
576 switch (sample_freq) {
636 case 32000: 577 case 32000:
637 sample_freq = HDMI_AUDIO_FS_32000; 578 case 48000:
579 case 96000:
580 case 192000:
581 if (deep_color == 125)
582 if (pclk == 27027 || pclk == 74250)
583 deep_color_correct = true;
584 if (deep_color == 150)
585 if (pclk == 27027)
586 deep_color_correct = true;
638 break; 587 break;
639 case 44100: 588 case 44100:
640 sample_freq = HDMI_AUDIO_FS_44100; 589 case 88200:
641 break; 590 case 176400:
642 case 48000: 591 if (deep_color == 125)
643 sample_freq = HDMI_AUDIO_FS_48000; 592 if (pclk == 27027)
593 deep_color_correct = true;
644 break; 594 break;
645 default: 595 default:
646 return -EINVAL; 596 return -EINVAL;
647 } 597 }
648 598
649 err = hdmi_config_audio_acr(ip_data, params_rate(params), &n, &cts); 599 if (deep_color_correct) {
650 if (err < 0) 600 switch (sample_freq) {
651 return err; 601 case 32000:
652 602 *n = 8192;
653 /* Audio wrapper config */ 603 break;
654 audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL; 604 case 44100:
655 audio_format.active_chnnls_msk = 0x03; 605 *n = 12544;
656 audio_format.type = HDMI_AUDIO_TYPE_LPCM; 606 break;
657 audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST; 607 case 48000:
658 /* Disable start/stop signals of IEC 60958 blocks */ 608 *n = 8192;
659 audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF; 609 break;
610 case 88200:
611 *n = 25088;
612 break;
613 case 96000:
614 *n = 16384;
615 break;
616 case 176400:
617 *n = 50176;
618 break;
619 case 192000:
620 *n = 32768;
621 break;
622 default:
623 return -EINVAL;
624 }
625 } else {
626 switch (sample_freq) {
627 case 32000:
628 *n = 4096;
629 break;
630 case 44100:
631 *n = 6272;
632 break;
633 case 48000:
634 *n = 6144;
635 break;
636 case 88200:
637 *n = 12544;
638 break;
639 case 96000:
640 *n = 12288;
641 break;
642 case 176400:
643 *n = 25088;
644 break;
645 case 192000:
646 *n = 24576;
647 break;
648 default:
649 return -EINVAL;
650 }
651 }
652 /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
653 *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
660 654
661 audio_dma.block_size = 0xC0; 655 return 0;
662 audio_dma.mode = HDMI_AUDIO_TRANSF_DMA; 656}
663 audio_dma.fifo_threshold = 0x20; /* in number of samples */
664 657
665 hdmi_wp_audio_config_dma(ip_data, &audio_dma); 658int hdmi_audio_enable(void)
666 hdmi_wp_audio_config_format(ip_data, &audio_format); 659{
660 DSSDBG("audio_enable\n");
667 661
668 /* 662 return hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
669 * I2S config 663}
670 */
671 core_cfg.i2s_cfg.en_high_bitrate_aud = false;
672 /* Only used with high bitrate audio */
673 core_cfg.i2s_cfg.cbit_order = false;
674 /* Serial data and word select should change on sck rising edge */
675 core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
676 core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
677 /* Set I2S word select polarity */
678 core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
679 core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
680 /* Set serial data to word select shift. See Phillips spec. */
681 core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
682 /* Enable one of the four available serial data channels */
683 core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
684
685 /* Core audio config */
686 core_cfg.freq_sample = sample_freq;
687 core_cfg.n = n;
688 core_cfg.cts = cts;
689 if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
690 core_cfg.aud_par_busclk = 0;
691 core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
692 core_cfg.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
693 } else {
694 core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
695 core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
696 core_cfg.use_mclk = true;
697 }
698 664
699 if (core_cfg.use_mclk) 665void hdmi_audio_disable(void)
700 core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS; 666{
701 core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH; 667 DSSDBG("audio_disable\n");
702 core_cfg.en_spdif = false;
703 /* Use sample frequency from channel status word */
704 core_cfg.fs_override = true;
705 /* Enable ACR packets */
706 core_cfg.en_acr_pkt = true;
707 /* Disable direct streaming digital audio */
708 core_cfg.en_dsd_audio = false;
709 /* Use parallel audio interface */
710 core_cfg.en_parallel_aud_input = true;
711
712 hdmi_core_audio_config(ip_data, &core_cfg);
713 668
714 /* 669 hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
715 * Configure packet
716 * info frame audio see doc CEA861-D page 74
717 */
718 aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
719 aud_if_cfg.db1_channel_count = 2;
720 aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
721 aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
722 aud_if_cfg.db4_channel_alloc = 0x00;
723 aud_if_cfg.db5_downmix_inh = false;
724 aud_if_cfg.db5_lsv = 0;
725
726 hdmi_core_audio_infoframe_config(ip_data, &aud_if_cfg);
727 return 0;
728} 670}
729 671
730static int hdmi_audio_startup(struct snd_pcm_substream *substream, 672int hdmi_audio_start(void)
731 struct snd_soc_dai *dai)
732{ 673{
733 if (!hdmi.ip_data.cfg.cm.mode) { 674 DSSDBG("audio_start\n");
734 pr_err("Current video settings do not support audio.\n"); 675
735 return -EIO; 676 return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
736 }
737 return 0;
738} 677}
739 678
740static int hdmi_audio_codec_probe(struct snd_soc_codec *codec) 679void hdmi_audio_stop(void)
741{ 680{
742 struct hdmi_ip_data *priv = &hdmi.ip_data; 681 DSSDBG("audio_stop\n");
743 682
744 snd_soc_codec_set_drvdata(codec, priv); 683 hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
745 return 0;
746} 684}
747 685
748static struct snd_soc_codec_driver hdmi_audio_codec_drv = { 686bool hdmi_mode_has_audio(void)
749 .probe = hdmi_audio_codec_probe, 687{
750}; 688 if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
689 return true;
690 else
691 return false;
692}
751 693
752static struct snd_soc_dai_ops hdmi_audio_codec_ops = { 694int hdmi_audio_config(struct omap_dss_audio *audio)
753 .hw_params = hdmi_audio_hw_params, 695{
754 .trigger = hdmi_audio_trigger, 696 return hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
755 .startup = hdmi_audio_startup, 697}
756};
757 698
758static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
759 .name = "hdmi-audio-codec",
760 .playback = {
761 .channels_min = 2,
762 .channels_max = 2,
763 .rates = SNDRV_PCM_RATE_32000 |
764 SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
765 .formats = SNDRV_PCM_FMTBIT_S16_LE |
766 SNDRV_PCM_FMTBIT_S24_LE,
767 },
768 .ops = &hdmi_audio_codec_ops,
769};
770#endif 699#endif
771 700
772static int hdmi_get_clocks(struct platform_device *pdev) 701static void __init hdmi_probe_pdata(struct platform_device *pdev)
773{ 702{
774 struct clk *clk; 703 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
704 int r, i;
775 705
776 clk = clk_get(&pdev->dev, "sys_clk"); 706 for (i = 0; i < pdata->num_devices; ++i) {
777 if (IS_ERR(clk)) { 707 struct omap_dss_device *dssdev = pdata->devices[i];
778 DSSERR("can't get sys_clk\n");
779 return PTR_ERR(clk);
780 }
781 708
782 hdmi.sys_clk = clk; 709 if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI)
710 continue;
783 711
784 return 0; 712 r = hdmi_init_display(dssdev);
785} 713 if (r) {
714 DSSERR("device %s init failed: %d\n", dssdev->name, r);
715 continue;
716 }
786 717
787static void hdmi_put_clocks(void) 718 r = omap_dss_register_device(dssdev, &pdev->dev, i);
788{ 719 if (r)
789 if (hdmi.sys_clk) 720 DSSERR("device %s register failed: %d\n",
790 clk_put(hdmi.sys_clk); 721 dssdev->name, r);
722 }
791} 723}
792 724
793/* HDMI HW IP initialisation */ 725/* HDMI HW IP initialisation */
794static int omapdss_hdmihw_probe(struct platform_device *pdev) 726static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
795{ 727{
796 struct resource *hdmi_mem; 728 struct resource *hdmi_mem;
797 int r; 729 int r;
798 730
799 hdmi.pdata = pdev->dev.platform_data;
800 hdmi.pdev = pdev; 731 hdmi.pdev = pdev;
801 732
802 mutex_init(&hdmi.lock); 733 mutex_init(&hdmi.lock);
@@ -830,28 +761,18 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
830 761
831 hdmi_panel_init(); 762 hdmi_panel_init();
832 763
833#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 764 dss_debugfs_create_file("hdmi", hdmi_dump_regs);
834 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 765
766 hdmi_probe_pdata(pdev);
835 767
836 /* Register ASoC codec DAI */
837 r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
838 &hdmi_codec_dai_drv, 1);
839 if (r) {
840 DSSERR("can't register ASoC HDMI audio codec\n");
841 return r;
842 }
843#endif
844 return 0; 768 return 0;
845} 769}
846 770
847static int omapdss_hdmihw_remove(struct platform_device *pdev) 771static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
848{ 772{
849 hdmi_panel_exit(); 773 omap_dss_unregister_child_devices(&pdev->dev);
850 774
851#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 775 hdmi_panel_exit();
852 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
853 snd_soc_unregister_codec(&pdev->dev);
854#endif
855 776
856 pm_runtime_disable(&pdev->dev); 777 pm_runtime_disable(&pdev->dev);
857 778
@@ -867,7 +788,6 @@ static int hdmi_runtime_suspend(struct device *dev)
867 clk_disable(hdmi.sys_clk); 788 clk_disable(hdmi.sys_clk);
868 789
869 dispc_runtime_put(); 790 dispc_runtime_put();
870 dss_runtime_put();
871 791
872 return 0; 792 return 0;
873} 793}
@@ -876,23 +796,13 @@ static int hdmi_runtime_resume(struct device *dev)
876{ 796{
877 int r; 797 int r;
878 798
879 r = dss_runtime_get();
880 if (r < 0)
881 goto err_get_dss;
882
883 r = dispc_runtime_get(); 799 r = dispc_runtime_get();
884 if (r < 0) 800 if (r < 0)
885 goto err_get_dispc; 801 return r;
886
887 802
888 clk_enable(hdmi.sys_clk); 803 clk_enable(hdmi.sys_clk);
889 804
890 return 0; 805 return 0;
891
892err_get_dispc:
893 dss_runtime_put();
894err_get_dss:
895 return r;
896} 806}
897 807
898static const struct dev_pm_ops hdmi_pm_ops = { 808static const struct dev_pm_ops hdmi_pm_ops = {
@@ -901,8 +811,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
901}; 811};
902 812
903static struct platform_driver omapdss_hdmihw_driver = { 813static struct platform_driver omapdss_hdmihw_driver = {
904 .probe = omapdss_hdmihw_probe, 814 .remove = __exit_p(omapdss_hdmihw_remove),
905 .remove = omapdss_hdmihw_remove,
906 .driver = { 815 .driver = {
907 .name = "omapdss_hdmi", 816 .name = "omapdss_hdmi",
908 .owner = THIS_MODULE, 817 .owner = THIS_MODULE,
@@ -910,12 +819,12 @@ static struct platform_driver omapdss_hdmihw_driver = {
910 }, 819 },
911}; 820};
912 821
913int hdmi_init_platform_driver(void) 822int __init hdmi_init_platform_driver(void)
914{ 823{
915 return platform_driver_register(&omapdss_hdmihw_driver); 824 return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe);
916} 825}
917 826
918void hdmi_uninit_platform_driver(void) 827void __exit hdmi_uninit_platform_driver(void)
919{ 828{
920 return platform_driver_unregister(&omapdss_hdmihw_driver); 829 platform_driver_unregister(&omapdss_hdmihw_driver);
921} 830}
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index 533d5dc634d2..1179e3c4b1c7 100644
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -30,7 +30,12 @@
30#include "dss.h" 30#include "dss.h"
31 31
32static struct { 32static struct {
33 struct mutex hdmi_lock; 33 /* This protects the panel ops, mainly when accessing the HDMI IP. */
34 struct mutex lock;
35#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
36 /* This protects the audio ops, specifically. */
37 spinlock_t audio_lock;
38#endif
34} hdmi; 39} hdmi;
35 40
36 41
@@ -54,12 +59,168 @@ static void hdmi_panel_remove(struct omap_dss_device *dssdev)
54 59
55} 60}
56 61
62#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
63static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
64{
65 unsigned long flags;
66 int r;
67
68 mutex_lock(&hdmi.lock);
69 spin_lock_irqsave(&hdmi.audio_lock, flags);
70
71 /* enable audio only if the display is active and supports audio */
72 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
73 !hdmi_mode_has_audio()) {
74 DSSERR("audio not supported or display is off\n");
75 r = -EPERM;
76 goto err;
77 }
78
79 r = hdmi_audio_enable();
80
81 if (!r)
82 dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
83
84err:
85 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
86 mutex_unlock(&hdmi.lock);
87 return r;
88}
89
90static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
91{
92 unsigned long flags;
93
94 spin_lock_irqsave(&hdmi.audio_lock, flags);
95
96 hdmi_audio_disable();
97
98 dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
99
100 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
101}
102
103static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
104{
105 unsigned long flags;
106 int r;
107
108 spin_lock_irqsave(&hdmi.audio_lock, flags);
109 /*
110 * No need to check the panel state. It was checked when trasitioning
111 * to AUDIO_ENABLED.
112 */
113 if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) {
114 DSSERR("audio start from invalid state\n");
115 r = -EPERM;
116 goto err;
117 }
118
119 r = hdmi_audio_start();
120
121 if (!r)
122 dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
123
124err:
125 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
126 return r;
127}
128
129static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
130{
131 unsigned long flags;
132
133 spin_lock_irqsave(&hdmi.audio_lock, flags);
134
135 hdmi_audio_stop();
136 dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
137
138 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
139}
140
141static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
142{
143 bool r = false;
144
145 mutex_lock(&hdmi.lock);
146
147 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
148 goto err;
149
150 if (!hdmi_mode_has_audio())
151 goto err;
152
153 r = true;
154err:
155 mutex_unlock(&hdmi.lock);
156 return r;
157}
158
159static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
160 struct omap_dss_audio *audio)
161{
162 unsigned long flags;
163 int r;
164
165 mutex_lock(&hdmi.lock);
166 spin_lock_irqsave(&hdmi.audio_lock, flags);
167
168 /* config audio only if the display is active and supports audio */
169 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
170 !hdmi_mode_has_audio()) {
171 DSSERR("audio not supported or display is off\n");
172 r = -EPERM;
173 goto err;
174 }
175
176 r = hdmi_audio_config(audio);
177
178 if (!r)
179 dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
180
181err:
182 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
183 mutex_unlock(&hdmi.lock);
184 return r;
185}
186
187#else
188static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
189{
190 return -EPERM;
191}
192
193static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
194{
195}
196
197static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
198{
199 return -EPERM;
200}
201
202static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
203{
204}
205
206static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
207{
208 return false;
209}
210
211static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
212 struct omap_dss_audio *audio)
213{
214 return -EPERM;
215}
216#endif
217
57static int hdmi_panel_enable(struct omap_dss_device *dssdev) 218static int hdmi_panel_enable(struct omap_dss_device *dssdev)
58{ 219{
59 int r = 0; 220 int r = 0;
60 DSSDBG("ENTER hdmi_panel_enable\n"); 221 DSSDBG("ENTER hdmi_panel_enable\n");
61 222
62 mutex_lock(&hdmi.hdmi_lock); 223 mutex_lock(&hdmi.lock);
63 224
64 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { 225 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
65 r = -EINVAL; 226 r = -EINVAL;
@@ -75,40 +236,52 @@ static int hdmi_panel_enable(struct omap_dss_device *dssdev)
75 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 236 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
76 237
77err: 238err:
78 mutex_unlock(&hdmi.hdmi_lock); 239 mutex_unlock(&hdmi.lock);
79 240
80 return r; 241 return r;
81} 242}
82 243
83static void hdmi_panel_disable(struct omap_dss_device *dssdev) 244static void hdmi_panel_disable(struct omap_dss_device *dssdev)
84{ 245{
85 mutex_lock(&hdmi.hdmi_lock); 246 mutex_lock(&hdmi.lock);
86 247
87 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 248 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
249 /*
250 * TODO: notify audio users that the display was disabled. For
251 * now, disable audio locally to not break our audio state
252 * machine.
253 */
254 hdmi_panel_audio_disable(dssdev);
88 omapdss_hdmi_display_disable(dssdev); 255 omapdss_hdmi_display_disable(dssdev);
256 }
89 257
90 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 258 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
91 259
92 mutex_unlock(&hdmi.hdmi_lock); 260 mutex_unlock(&hdmi.lock);
93} 261}
94 262
95static int hdmi_panel_suspend(struct omap_dss_device *dssdev) 263static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
96{ 264{
97 int r = 0; 265 int r = 0;
98 266
99 mutex_lock(&hdmi.hdmi_lock); 267 mutex_lock(&hdmi.lock);
100 268
101 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { 269 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
102 r = -EINVAL; 270 r = -EINVAL;
103 goto err; 271 goto err;
104 } 272 }
105 273
106 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; 274 /*
275 * TODO: notify audio users that the display was suspended. For now,
276 * disable audio locally to not break our audio state machine.
277 */
278 hdmi_panel_audio_disable(dssdev);
107 279
280 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
108 omapdss_hdmi_display_disable(dssdev); 281 omapdss_hdmi_display_disable(dssdev);
109 282
110err: 283err:
111 mutex_unlock(&hdmi.hdmi_lock); 284 mutex_unlock(&hdmi.lock);
112 285
113 return r; 286 return r;
114} 287}
@@ -117,7 +290,7 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
117{ 290{
118 int r = 0; 291 int r = 0;
119 292
120 mutex_lock(&hdmi.hdmi_lock); 293 mutex_lock(&hdmi.lock);
121 294
122 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) { 295 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
123 r = -EINVAL; 296 r = -EINVAL;
@@ -129,11 +302,12 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
129 DSSERR("failed to power on\n"); 302 DSSERR("failed to power on\n");
130 goto err; 303 goto err;
131 } 304 }
305 /* TODO: notify audio users that the panel resumed. */
132 306
133 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 307 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
134 308
135err: 309err:
136 mutex_unlock(&hdmi.hdmi_lock); 310 mutex_unlock(&hdmi.lock);
137 311
138 return r; 312 return r;
139} 313}
@@ -141,11 +315,11 @@ err:
141static void hdmi_get_timings(struct omap_dss_device *dssdev, 315static void hdmi_get_timings(struct omap_dss_device *dssdev,
142 struct omap_video_timings *timings) 316 struct omap_video_timings *timings)
143{ 317{
144 mutex_lock(&hdmi.hdmi_lock); 318 mutex_lock(&hdmi.lock);
145 319
146 *timings = dssdev->panel.timings; 320 *timings = dssdev->panel.timings;
147 321
148 mutex_unlock(&hdmi.hdmi_lock); 322 mutex_unlock(&hdmi.lock);
149} 323}
150 324
151static void hdmi_set_timings(struct omap_dss_device *dssdev, 325static void hdmi_set_timings(struct omap_dss_device *dssdev,
@@ -153,12 +327,18 @@ static void hdmi_set_timings(struct omap_dss_device *dssdev,
153{ 327{
154 DSSDBG("hdmi_set_timings\n"); 328 DSSDBG("hdmi_set_timings\n");
155 329
156 mutex_lock(&hdmi.hdmi_lock); 330 mutex_lock(&hdmi.lock);
331
332 /*
333 * TODO: notify audio users that there was a timings change. For
334 * now, disable audio locally to not break our audio state machine.
335 */
336 hdmi_panel_audio_disable(dssdev);
157 337
158 dssdev->panel.timings = *timings; 338 dssdev->panel.timings = *timings;
159 omapdss_hdmi_display_set_timing(dssdev); 339 omapdss_hdmi_display_set_timing(dssdev);
160 340
161 mutex_unlock(&hdmi.hdmi_lock); 341 mutex_unlock(&hdmi.lock);
162} 342}
163 343
164static int hdmi_check_timings(struct omap_dss_device *dssdev, 344static int hdmi_check_timings(struct omap_dss_device *dssdev,
@@ -168,11 +348,11 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev,
168 348
169 DSSDBG("hdmi_check_timings\n"); 349 DSSDBG("hdmi_check_timings\n");
170 350
171 mutex_lock(&hdmi.hdmi_lock); 351 mutex_lock(&hdmi.lock);
172 352
173 r = omapdss_hdmi_display_check_timing(dssdev, timings); 353 r = omapdss_hdmi_display_check_timing(dssdev, timings);
174 354
175 mutex_unlock(&hdmi.hdmi_lock); 355 mutex_unlock(&hdmi.lock);
176 return r; 356 return r;
177} 357}
178 358
@@ -180,7 +360,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
180{ 360{
181 int r; 361 int r;
182 362
183 mutex_lock(&hdmi.hdmi_lock); 363 mutex_lock(&hdmi.lock);
184 364
185 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { 365 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
186 r = omapdss_hdmi_display_enable(dssdev); 366 r = omapdss_hdmi_display_enable(dssdev);
@@ -194,7 +374,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
194 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) 374 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
195 omapdss_hdmi_display_disable(dssdev); 375 omapdss_hdmi_display_disable(dssdev);
196err: 376err:
197 mutex_unlock(&hdmi.hdmi_lock); 377 mutex_unlock(&hdmi.lock);
198 378
199 return r; 379 return r;
200} 380}
@@ -203,7 +383,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
203{ 383{
204 int r; 384 int r;
205 385
206 mutex_lock(&hdmi.hdmi_lock); 386 mutex_lock(&hdmi.lock);
207 387
208 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { 388 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
209 r = omapdss_hdmi_display_enable(dssdev); 389 r = omapdss_hdmi_display_enable(dssdev);
@@ -217,7 +397,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
217 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) 397 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
218 omapdss_hdmi_display_disable(dssdev); 398 omapdss_hdmi_display_disable(dssdev);
219err: 399err:
220 mutex_unlock(&hdmi.hdmi_lock); 400 mutex_unlock(&hdmi.lock);
221 401
222 return r; 402 return r;
223} 403}
@@ -234,6 +414,12 @@ static struct omap_dss_driver hdmi_driver = {
234 .check_timings = hdmi_check_timings, 414 .check_timings = hdmi_check_timings,
235 .read_edid = hdmi_read_edid, 415 .read_edid = hdmi_read_edid,
236 .detect = hdmi_detect, 416 .detect = hdmi_detect,
417 .audio_enable = hdmi_panel_audio_enable,
418 .audio_disable = hdmi_panel_audio_disable,
419 .audio_start = hdmi_panel_audio_start,
420 .audio_stop = hdmi_panel_audio_stop,
421 .audio_supported = hdmi_panel_audio_supported,
422 .audio_config = hdmi_panel_audio_config,
237 .driver = { 423 .driver = {
238 .name = "hdmi_panel", 424 .name = "hdmi_panel",
239 .owner = THIS_MODULE, 425 .owner = THIS_MODULE,
@@ -242,7 +428,11 @@ static struct omap_dss_driver hdmi_driver = {
242 428
243int hdmi_panel_init(void) 429int hdmi_panel_init(void)
244{ 430{
245 mutex_init(&hdmi.hdmi_lock); 431 mutex_init(&hdmi.lock);
432
433#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
434 spin_lock_init(&hdmi.audio_lock);
435#endif
246 436
247 omap_dss_register_driver(&hdmi_driver); 437 omap_dss_register_driver(&hdmi_driver);
248 438
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index e7364603f6a1..0cbcde4c688a 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -654,9 +654,20 @@ static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
654 return 0; 654 return 0;
655} 655}
656 656
657int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
658 const struct omap_video_timings *timings)
659{
660 if (!dispc_mgr_timings_ok(mgr->id, timings)) {
661 DSSERR("check_manager: invalid timings\n");
662 return -EINVAL;
663 }
664
665 return 0;
666}
667
657int dss_mgr_check(struct omap_overlay_manager *mgr, 668int dss_mgr_check(struct omap_overlay_manager *mgr,
658 struct omap_dss_device *dssdev,
659 struct omap_overlay_manager_info *info, 669 struct omap_overlay_manager_info *info,
670 const struct omap_video_timings *mgr_timings,
660 struct omap_overlay_info **overlay_infos) 671 struct omap_overlay_info **overlay_infos)
661{ 672{
662 struct omap_overlay *ovl; 673 struct omap_overlay *ovl;
@@ -668,6 +679,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
668 return r; 679 return r;
669 } 680 }
670 681
682 r = dss_mgr_check_timings(mgr, mgr_timings);
683 if (r)
684 return r;
685
671 list_for_each_entry(ovl, &mgr->overlays, list) { 686 list_for_each_entry(ovl, &mgr->overlays, list) {
672 struct omap_overlay_info *oi; 687 struct omap_overlay_info *oi;
673 int r; 688 int r;
@@ -677,7 +692,7 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
677 if (oi == NULL) 692 if (oi == NULL)
678 continue; 693 continue;
679 694
680 r = dss_ovl_check(ovl, oi, dssdev); 695 r = dss_ovl_check(ovl, oi, mgr_timings);
681 if (r) 696 if (r)
682 return r; 697 return r;
683 } 698 }
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 6e821810deec..b0ba60f88dd2 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -628,19 +628,23 @@ int dss_ovl_simple_check(struct omap_overlay *ovl,
628 return -EINVAL; 628 return -EINVAL;
629 } 629 }
630 630
631 if (dss_feat_rotation_type_supported(info->rotation_type) == 0) {
632 DSSERR("check_overlay: rotation type %d not supported\n",
633 info->rotation_type);
634 return -EINVAL;
635 }
636
631 return 0; 637 return 0;
632} 638}
633 639
634int dss_ovl_check(struct omap_overlay *ovl, 640int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
635 struct omap_overlay_info *info, struct omap_dss_device *dssdev) 641 const struct omap_video_timings *mgr_timings)
636{ 642{
637 u16 outw, outh; 643 u16 outw, outh;
638 u16 dw, dh; 644 u16 dw, dh;
639 645
640 if (dssdev == NULL) 646 dw = mgr_timings->x_res;
641 return 0; 647 dh = mgr_timings->y_res;
642
643 dssdev->driver->get_resolution(dssdev, &dw, &dh);
644 648
645 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { 649 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
646 outw = info->width; 650 outw = info->width;
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 788a0ef6323a..3d8c206e90e5 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -304,13 +304,23 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
304 u16 height, void (*callback)(void *data), void *data) 304 u16 height, void (*callback)(void *data), void *data)
305{ 305{
306 u32 l; 306 u32 l;
307 struct omap_video_timings timings = {
308 .hsw = 1,
309 .hfp = 1,
310 .hbp = 1,
311 .vsw = 1,
312 .vfp = 0,
313 .vbp = 0,
314 .x_res = width,
315 .y_res = height,
316 };
307 317
308 /*BUG_ON(callback == 0);*/ 318 /*BUG_ON(callback == 0);*/
309 BUG_ON(rfbi.framedone_callback != NULL); 319 BUG_ON(rfbi.framedone_callback != NULL);
310 320
311 DSSDBG("rfbi_transfer_area %dx%d\n", width, height); 321 DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
312 322
313 dispc_mgr_set_lcd_size(dssdev->manager->id, width, height); 323 dss_mgr_set_timings(dssdev->manager, &timings);
314 324
315 dispc_mgr_enable(dssdev->manager->id, true); 325 dispc_mgr_enable(dssdev->manager->id, true);
316 326
@@ -766,6 +776,16 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
766 u16 *x, u16 *y, u16 *w, u16 *h) 776 u16 *x, u16 *y, u16 *w, u16 *h)
767{ 777{
768 u16 dw, dh; 778 u16 dw, dh;
779 struct omap_video_timings timings = {
780 .hsw = 1,
781 .hfp = 1,
782 .hbp = 1,
783 .vsw = 1,
784 .vfp = 0,
785 .vbp = 0,
786 .x_res = *w,
787 .y_res = *h,
788 };
769 789
770 dssdev->driver->get_resolution(dssdev, &dw, &dh); 790 dssdev->driver->get_resolution(dssdev, &dw, &dh);
771 791
@@ -784,7 +804,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
784 if (*w == 0 || *h == 0) 804 if (*w == 0 || *h == 0)
785 return -EINVAL; 805 return -EINVAL;
786 806
787 dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h); 807 dss_mgr_set_timings(dssdev->manager, &timings);
788 808
789 return 0; 809 return 0;
790} 810}
@@ -799,7 +819,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
799} 819}
800EXPORT_SYMBOL(omap_rfbi_update); 820EXPORT_SYMBOL(omap_rfbi_update);
801 821
802void rfbi_dump_regs(struct seq_file *s) 822static void rfbi_dump_regs(struct seq_file *s)
803{ 823{
804#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) 824#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
805 825
@@ -900,15 +920,39 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
900} 920}
901EXPORT_SYMBOL(omapdss_rfbi_display_disable); 921EXPORT_SYMBOL(omapdss_rfbi_display_disable);
902 922
903int rfbi_init_display(struct omap_dss_device *dssdev) 923static int __init rfbi_init_display(struct omap_dss_device *dssdev)
904{ 924{
905 rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev; 925 rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
906 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; 926 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
907 return 0; 927 return 0;
908} 928}
909 929
930static void __init rfbi_probe_pdata(struct platform_device *pdev)
931{
932 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
933 int i, r;
934
935 for (i = 0; i < pdata->num_devices; ++i) {
936 struct omap_dss_device *dssdev = pdata->devices[i];
937
938 if (dssdev->type != OMAP_DISPLAY_TYPE_DBI)
939 continue;
940
941 r = rfbi_init_display(dssdev);
942 if (r) {
943 DSSERR("device %s init failed: %d\n", dssdev->name, r);
944 continue;
945 }
946
947 r = omap_dss_register_device(dssdev, &pdev->dev, i);
948 if (r)
949 DSSERR("device %s register failed: %d\n",
950 dssdev->name, r);
951 }
952}
953
910/* RFBI HW IP initialisation */ 954/* RFBI HW IP initialisation */
911static int omap_rfbihw_probe(struct platform_device *pdev) 955static int __init omap_rfbihw_probe(struct platform_device *pdev)
912{ 956{
913 u32 rev; 957 u32 rev;
914 struct resource *rfbi_mem; 958 struct resource *rfbi_mem;
@@ -956,6 +1000,10 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
956 1000
957 rfbi_runtime_put(); 1001 rfbi_runtime_put();
958 1002
1003 dss_debugfs_create_file("rfbi", rfbi_dump_regs);
1004
1005 rfbi_probe_pdata(pdev);
1006
959 return 0; 1007 return 0;
960 1008
961err_runtime_get: 1009err_runtime_get:
@@ -963,8 +1011,9 @@ err_runtime_get:
963 return r; 1011 return r;
964} 1012}
965 1013
966static int omap_rfbihw_remove(struct platform_device *pdev) 1014static int __exit omap_rfbihw_remove(struct platform_device *pdev)
967{ 1015{
1016 omap_dss_unregister_child_devices(&pdev->dev);
968 pm_runtime_disable(&pdev->dev); 1017 pm_runtime_disable(&pdev->dev);
969 return 0; 1018 return 0;
970} 1019}
@@ -972,7 +1021,6 @@ static int omap_rfbihw_remove(struct platform_device *pdev)
972static int rfbi_runtime_suspend(struct device *dev) 1021static int rfbi_runtime_suspend(struct device *dev)
973{ 1022{
974 dispc_runtime_put(); 1023 dispc_runtime_put();
975 dss_runtime_put();
976 1024
977 return 0; 1025 return 0;
978} 1026}
@@ -981,20 +1029,11 @@ static int rfbi_runtime_resume(struct device *dev)
981{ 1029{
982 int r; 1030 int r;
983 1031
984 r = dss_runtime_get();
985 if (r < 0)
986 goto err_get_dss;
987
988 r = dispc_runtime_get(); 1032 r = dispc_runtime_get();
989 if (r < 0) 1033 if (r < 0)
990 goto err_get_dispc; 1034 return r;
991 1035
992 return 0; 1036 return 0;
993
994err_get_dispc:
995 dss_runtime_put();
996err_get_dss:
997 return r;
998} 1037}
999 1038
1000static const struct dev_pm_ops rfbi_pm_ops = { 1039static const struct dev_pm_ops rfbi_pm_ops = {
@@ -1003,8 +1042,7 @@ static const struct dev_pm_ops rfbi_pm_ops = {
1003}; 1042};
1004 1043
1005static struct platform_driver omap_rfbihw_driver = { 1044static struct platform_driver omap_rfbihw_driver = {
1006 .probe = omap_rfbihw_probe, 1045 .remove = __exit_p(omap_rfbihw_remove),
1007 .remove = omap_rfbihw_remove,
1008 .driver = { 1046 .driver = {
1009 .name = "omapdss_rfbi", 1047 .name = "omapdss_rfbi",
1010 .owner = THIS_MODULE, 1048 .owner = THIS_MODULE,
@@ -1012,12 +1050,12 @@ static struct platform_driver omap_rfbihw_driver = {
1012 }, 1050 },
1013}; 1051};
1014 1052
1015int rfbi_init_platform_driver(void) 1053int __init rfbi_init_platform_driver(void)
1016{ 1054{
1017 return platform_driver_register(&omap_rfbihw_driver); 1055 return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe);
1018} 1056}
1019 1057
1020void rfbi_uninit_platform_driver(void) 1058void __exit rfbi_uninit_platform_driver(void)
1021{ 1059{
1022 return platform_driver_unregister(&omap_rfbihw_driver); 1060 platform_driver_unregister(&omap_rfbihw_driver);
1023} 1061}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 8266ca0d666b..3a43dc2a9b46 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -24,6 +24,7 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
26#include <linux/export.h> 26#include <linux/export.h>
27#include <linux/platform_device.h>
27 28
28#include <video/omapdss.h> 29#include <video/omapdss.h>
29#include "dss.h" 30#include "dss.h"
@@ -71,10 +72,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
71 if (r) 72 if (r)
72 goto err_reg_enable; 73 goto err_reg_enable;
73 74
74 r = dss_runtime_get();
75 if (r)
76 goto err_get_dss;
77
78 r = dispc_runtime_get(); 75 r = dispc_runtime_get();
79 if (r) 76 if (r)
80 goto err_get_dispc; 77 goto err_get_dispc;
@@ -107,7 +104,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
107 } 104 }
108 105
109 106
110 dispc_mgr_set_lcd_timings(dssdev->manager->id, t); 107 dss_mgr_set_timings(dssdev->manager, t);
111 108
112 r = dss_set_clock_div(&dss_cinfo); 109 r = dss_set_clock_div(&dss_cinfo);
113 if (r) 110 if (r)
@@ -137,8 +134,6 @@ err_set_dss_clock_div:
137err_calc_clock_div: 134err_calc_clock_div:
138 dispc_runtime_put(); 135 dispc_runtime_put();
139err_get_dispc: 136err_get_dispc:
140 dss_runtime_put();
141err_get_dss:
142 regulator_disable(sdi.vdds_sdi_reg); 137 regulator_disable(sdi.vdds_sdi_reg);
143err_reg_enable: 138err_reg_enable:
144 omap_dss_stop_device(dssdev); 139 omap_dss_stop_device(dssdev);
@@ -154,7 +149,6 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
154 dss_sdi_disable(); 149 dss_sdi_disable();
155 150
156 dispc_runtime_put(); 151 dispc_runtime_put();
157 dss_runtime_put();
158 152
159 regulator_disable(sdi.vdds_sdi_reg); 153 regulator_disable(sdi.vdds_sdi_reg);
160 154
@@ -162,7 +156,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
162} 156}
163EXPORT_SYMBOL(omapdss_sdi_display_disable); 157EXPORT_SYMBOL(omapdss_sdi_display_disable);
164 158
165int sdi_init_display(struct omap_dss_device *dssdev) 159static int __init sdi_init_display(struct omap_dss_device *dssdev)
166{ 160{
167 DSSDBG("SDI init\n"); 161 DSSDBG("SDI init\n");
168 162
@@ -182,11 +176,58 @@ int sdi_init_display(struct omap_dss_device *dssdev)
182 return 0; 176 return 0;
183} 177}
184 178
185int sdi_init(void) 179static void __init sdi_probe_pdata(struct platform_device *pdev)
180{
181 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
182 int i, r;
183
184 for (i = 0; i < pdata->num_devices; ++i) {
185 struct omap_dss_device *dssdev = pdata->devices[i];
186
187 if (dssdev->type != OMAP_DISPLAY_TYPE_SDI)
188 continue;
189
190 r = sdi_init_display(dssdev);
191 if (r) {
192 DSSERR("device %s init failed: %d\n", dssdev->name, r);
193 continue;
194 }
195
196 r = omap_dss_register_device(dssdev, &pdev->dev, i);
197 if (r)
198 DSSERR("device %s register failed: %d\n",
199 dssdev->name, r);
200 }
201}
202
203static int __init omap_sdi_probe(struct platform_device *pdev)
186{ 204{
205 sdi_probe_pdata(pdev);
206
207 return 0;
208}
209
210static int __exit omap_sdi_remove(struct platform_device *pdev)
211{
212 omap_dss_unregister_child_devices(&pdev->dev);
213
187 return 0; 214 return 0;
188} 215}
189 216
190void sdi_exit(void) 217static struct platform_driver omap_sdi_driver = {
218 .remove = __exit_p(omap_sdi_remove),
219 .driver = {
220 .name = "omapdss_sdi",
221 .owner = THIS_MODULE,
222 },
223};
224
225int __init sdi_init_platform_driver(void)
226{
227 return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe);
228}
229
230void __exit sdi_uninit_platform_driver(void)
191{ 231{
232 platform_driver_unregister(&omap_sdi_driver);
192} 233}
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index 1f58b84d6901..e734cb444bc7 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -96,7 +96,9 @@ struct ti_hdmi_ip_ops {
96 96
97 void (*pll_disable)(struct hdmi_ip_data *ip_data); 97 void (*pll_disable)(struct hdmi_ip_data *ip_data);
98 98
99 void (*video_enable)(struct hdmi_ip_data *ip_data, bool start); 99 int (*video_enable)(struct hdmi_ip_data *ip_data);
100
101 void (*video_disable)(struct hdmi_ip_data *ip_data);
100 102
101 void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s); 103 void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
102 104
@@ -106,9 +108,17 @@ struct ti_hdmi_ip_ops {
106 108
107 void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s); 109 void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
108 110
109#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 111#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
110 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 112 int (*audio_enable)(struct hdmi_ip_data *ip_data);
111 void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start); 113
114 void (*audio_disable)(struct hdmi_ip_data *ip_data);
115
116 int (*audio_start)(struct hdmi_ip_data *ip_data);
117
118 void (*audio_stop)(struct hdmi_ip_data *ip_data);
119
120 int (*audio_config)(struct hdmi_ip_data *ip_data,
121 struct omap_dss_audio *audio);
112#endif 122#endif
113 123
114}; 124};
@@ -173,7 +183,8 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
173void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data); 183void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
174int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len); 184int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
175bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data); 185bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data);
176void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start); 186int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
187void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
177int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data); 188int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
178void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data); 189void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
179void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data); 190void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
@@ -181,8 +192,13 @@ void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
181void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); 192void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
182void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); 193void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
183void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); 194void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
184#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 195#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
185 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 196int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
186void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable); 197int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
198void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
199int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
200void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
201int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
202 struct omap_dss_audio *audio);
187#endif 203#endif
188#endif 204#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index bfe6fe65c8be..4dae1b291079 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -29,9 +29,14 @@
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
33#include <sound/asound.h>
34#include <sound/asoundef.h>
35#endif
32 36
33#include "ti_hdmi_4xxx_ip.h" 37#include "ti_hdmi_4xxx_ip.h"
34#include "dss.h" 38#include "dss.h"
39#include "dss_features.h"
35 40
36static inline void hdmi_write_reg(void __iomem *base_addr, 41static inline void hdmi_write_reg(void __iomem *base_addr,
37 const u16 idx, u32 val) 42 const u16 idx, u32 val)
@@ -298,9 +303,9 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
298 REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27); 303 REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
299 304
300 r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio), 305 r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
301 NULL, hpd_irq_handler, 306 NULL, hpd_irq_handler,
302 IRQF_DISABLED | IRQF_TRIGGER_RISING | 307 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
303 IRQF_TRIGGER_FALLING, "hpd", ip_data); 308 IRQF_ONESHOT, "hpd", ip_data);
304 if (r) { 309 if (r) {
305 DSSERR("HPD IRQ request failed\n"); 310 DSSERR("HPD IRQ request failed\n");
306 hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); 311 hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
@@ -699,9 +704,15 @@ static void hdmi_wp_init(struct omap_video_timings *timings,
699 704
700} 705}
701 706
702void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start) 707int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
708{
709 REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
710 return 0;
711}
712
713void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
703{ 714{
704 REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31); 715 REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
705} 716}
706 717
707static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt, 718static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
@@ -886,10 +897,12 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
886 897
887#define CORE_REG(i, name) name(i) 898#define CORE_REG(i, name) name(i)
888#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\ 899#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
889 hdmi_read_reg(hdmi_pll_base(ip_data), r)) 900 hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
890#define DUMPCOREAV(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \ 901#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
902 hdmi_read_reg(hdmi_av_base(ip_data), r))
903#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
891 (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \ 904 (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
892 hdmi_read_reg(hdmi_pll_base(ip_data), CORE_REG(i, r))) 905 hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
893 906
894 DUMPCORE(HDMI_CORE_SYS_VND_IDL); 907 DUMPCORE(HDMI_CORE_SYS_VND_IDL);
895 DUMPCORE(HDMI_CORE_SYS_DEV_IDL); 908 DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
@@ -898,6 +911,13 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
898 DUMPCORE(HDMI_CORE_SYS_SRST); 911 DUMPCORE(HDMI_CORE_SYS_SRST);
899 DUMPCORE(HDMI_CORE_CTRL1); 912 DUMPCORE(HDMI_CORE_CTRL1);
900 DUMPCORE(HDMI_CORE_SYS_SYS_STAT); 913 DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
914 DUMPCORE(HDMI_CORE_SYS_DE_DLY);
915 DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
916 DUMPCORE(HDMI_CORE_SYS_DE_TOP);
917 DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
918 DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
919 DUMPCORE(HDMI_CORE_SYS_DE_LINL);
920 DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
901 DUMPCORE(HDMI_CORE_SYS_VID_ACEN); 921 DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
902 DUMPCORE(HDMI_CORE_SYS_VID_MODE); 922 DUMPCORE(HDMI_CORE_SYS_VID_MODE);
903 DUMPCORE(HDMI_CORE_SYS_INTR_STATE); 923 DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
@@ -907,102 +927,91 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
907 DUMPCORE(HDMI_CORE_SYS_INTR4); 927 DUMPCORE(HDMI_CORE_SYS_INTR4);
908 DUMPCORE(HDMI_CORE_SYS_UMASK1); 928 DUMPCORE(HDMI_CORE_SYS_UMASK1);
909 DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL); 929 DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
910 DUMPCORE(HDMI_CORE_SYS_DE_DLY);
911 DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
912 DUMPCORE(HDMI_CORE_SYS_DE_TOP);
913 DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
914 DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
915 DUMPCORE(HDMI_CORE_SYS_DE_LINL);
916 DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
917 930
918 DUMPCORE(HDMI_CORE_DDC_CMD);
919 DUMPCORE(HDMI_CORE_DDC_STATUS);
920 DUMPCORE(HDMI_CORE_DDC_ADDR); 931 DUMPCORE(HDMI_CORE_DDC_ADDR);
932 DUMPCORE(HDMI_CORE_DDC_SEGM);
921 DUMPCORE(HDMI_CORE_DDC_OFFSET); 933 DUMPCORE(HDMI_CORE_DDC_OFFSET);
922 DUMPCORE(HDMI_CORE_DDC_COUNT1); 934 DUMPCORE(HDMI_CORE_DDC_COUNT1);
923 DUMPCORE(HDMI_CORE_DDC_COUNT2); 935 DUMPCORE(HDMI_CORE_DDC_COUNT2);
936 DUMPCORE(HDMI_CORE_DDC_STATUS);
937 DUMPCORE(HDMI_CORE_DDC_CMD);
924 DUMPCORE(HDMI_CORE_DDC_DATA); 938 DUMPCORE(HDMI_CORE_DDC_DATA);
925 DUMPCORE(HDMI_CORE_DDC_SEGM);
926 939
927 DUMPCORE(HDMI_CORE_AV_HDMI_CTRL); 940 DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
928 DUMPCORE(HDMI_CORE_AV_DPD); 941 DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
929 DUMPCORE(HDMI_CORE_AV_PB_CTRL1); 942 DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
930 DUMPCORE(HDMI_CORE_AV_PB_CTRL2); 943 DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
931 DUMPCORE(HDMI_CORE_AV_AVI_TYPE); 944 DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
932 DUMPCORE(HDMI_CORE_AV_AVI_VERS); 945 DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
933 DUMPCORE(HDMI_CORE_AV_AVI_LEN); 946 DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
934 DUMPCORE(HDMI_CORE_AV_AVI_CHSUM); 947 DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
948 DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
949 DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
950 DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
951 DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
952 DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
953 DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
954 DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
955 DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
956 DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
957 DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
958 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
959 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
960 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
961 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
962 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
963 DUMPCOREAV(HDMI_CORE_AV_ASRC);
964 DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
965 DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
966 DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
967 DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
968 DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
969 DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
970 DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
971 DUMPCOREAV(HDMI_CORE_AV_DPD);
972 DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
973 DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
974 DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
975 DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
976 DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
977 DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
935 978
936 for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++) 979 for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
937 DUMPCOREAV(i, HDMI_CORE_AV_AVI_DBYTE); 980 DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
981
982 DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
983 DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
984 DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
985 DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
938 986
939 for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++) 987 for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
940 DUMPCOREAV(i, HDMI_CORE_AV_SPD_DBYTE); 988 DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
989
990 DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
991 DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
992 DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
993 DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
941 994
942 for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++) 995 for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
943 DUMPCOREAV(i, HDMI_CORE_AV_AUD_DBYTE); 996 DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
997
998 DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
999 DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
1000 DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
1001 DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
944 1002
945 for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++) 1003 for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
946 DUMPCOREAV(i, HDMI_CORE_AV_MPEG_DBYTE); 1004 DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
947 1005
948 for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++) 1006 for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
949 DUMPCOREAV(i, HDMI_CORE_AV_GEN_DBYTE); 1007 DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
1008
1009 DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
950 1010
951 for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++) 1011 for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
952 DUMPCOREAV(i, HDMI_CORE_AV_GEN2_DBYTE); 1012 DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
953 1013
954 DUMPCORE(HDMI_CORE_AV_ACR_CTRL); 1014 DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
955 DUMPCORE(HDMI_CORE_AV_FREQ_SVAL);
956 DUMPCORE(HDMI_CORE_AV_N_SVAL1);
957 DUMPCORE(HDMI_CORE_AV_N_SVAL2);
958 DUMPCORE(HDMI_CORE_AV_N_SVAL3);
959 DUMPCORE(HDMI_CORE_AV_CTS_SVAL1);
960 DUMPCORE(HDMI_CORE_AV_CTS_SVAL2);
961 DUMPCORE(HDMI_CORE_AV_CTS_SVAL3);
962 DUMPCORE(HDMI_CORE_AV_CTS_HVAL1);
963 DUMPCORE(HDMI_CORE_AV_CTS_HVAL2);
964 DUMPCORE(HDMI_CORE_AV_CTS_HVAL3);
965 DUMPCORE(HDMI_CORE_AV_AUD_MODE);
966 DUMPCORE(HDMI_CORE_AV_SPDIF_CTRL);
967 DUMPCORE(HDMI_CORE_AV_HW_SPDIF_FS);
968 DUMPCORE(HDMI_CORE_AV_SWAP_I2S);
969 DUMPCORE(HDMI_CORE_AV_SPDIF_ERTH);
970 DUMPCORE(HDMI_CORE_AV_I2S_IN_MAP);
971 DUMPCORE(HDMI_CORE_AV_I2S_IN_CTRL);
972 DUMPCORE(HDMI_CORE_AV_I2S_CHST0);
973 DUMPCORE(HDMI_CORE_AV_I2S_CHST1);
974 DUMPCORE(HDMI_CORE_AV_I2S_CHST2);
975 DUMPCORE(HDMI_CORE_AV_I2S_CHST4);
976 DUMPCORE(HDMI_CORE_AV_I2S_CHST5);
977 DUMPCORE(HDMI_CORE_AV_ASRC);
978 DUMPCORE(HDMI_CORE_AV_I2S_IN_LEN);
979 DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
980 DUMPCORE(HDMI_CORE_AV_AUDO_TXSTAT);
981 DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
982 DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
983 DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
984 DUMPCORE(HDMI_CORE_AV_TEST_TXCTRL);
985 DUMPCORE(HDMI_CORE_AV_DPD);
986 DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
987 DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
988 DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
989 DUMPCORE(HDMI_CORE_AV_AVI_VERS);
990 DUMPCORE(HDMI_CORE_AV_AVI_LEN);
991 DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
992 DUMPCORE(HDMI_CORE_AV_SPD_TYPE);
993 DUMPCORE(HDMI_CORE_AV_SPD_VERS);
994 DUMPCORE(HDMI_CORE_AV_SPD_LEN);
995 DUMPCORE(HDMI_CORE_AV_SPD_CHSUM);
996 DUMPCORE(HDMI_CORE_AV_AUDIO_TYPE);
997 DUMPCORE(HDMI_CORE_AV_AUDIO_VERS);
998 DUMPCORE(HDMI_CORE_AV_AUDIO_LEN);
999 DUMPCORE(HDMI_CORE_AV_AUDIO_CHSUM);
1000 DUMPCORE(HDMI_CORE_AV_MPEG_TYPE);
1001 DUMPCORE(HDMI_CORE_AV_MPEG_VERS);
1002 DUMPCORE(HDMI_CORE_AV_MPEG_LEN);
1003 DUMPCORE(HDMI_CORE_AV_MPEG_CHSUM);
1004 DUMPCORE(HDMI_CORE_AV_CP_BYTE1);
1005 DUMPCORE(HDMI_CORE_AV_CEC_ADDR_ID);
1006} 1015}
1007 1016
1008void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s) 1017void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
@@ -1016,9 +1025,8 @@ void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
1016 DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL); 1025 DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
1017} 1026}
1018 1027
1019#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 1028#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
1020 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 1029static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
1021void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
1022 struct hdmi_audio_format *aud_fmt) 1030 struct hdmi_audio_format *aud_fmt)
1023{ 1031{
1024 u32 r; 1032 u32 r;
@@ -1037,7 +1045,7 @@ void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
1037 hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r); 1045 hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
1038} 1046}
1039 1047
1040void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data, 1048static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
1041 struct hdmi_audio_dma *aud_dma) 1049 struct hdmi_audio_dma *aud_dma)
1042{ 1050{
1043 u32 r; 1051 u32 r;
@@ -1055,7 +1063,7 @@ void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
1055 hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r); 1063 hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
1056} 1064}
1057 1065
1058void hdmi_core_audio_config(struct hdmi_ip_data *ip_data, 1066static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
1059 struct hdmi_core_audio_config *cfg) 1067 struct hdmi_core_audio_config *cfg)
1060{ 1068{
1061 u32 r; 1069 u32 r;
@@ -1106,27 +1114,33 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
1106 REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL, 1114 REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
1107 cfg->fs_override, 1, 1); 1115 cfg->fs_override, 1, 1);
1108 1116
1109 /* I2S parameters */ 1117 /*
1110 REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_CHST4, 1118 * Set IEC-60958-3 channel status word. It is passed to the IP
1111 cfg->freq_sample, 3, 0); 1119 * just as it is received. The user of the driver is responsible
1112 1120 * for its contents.
1121 */
1122 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
1123 cfg->iec60958_cfg->status[0]);
1124 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
1125 cfg->iec60958_cfg->status[1]);
1126 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
1127 cfg->iec60958_cfg->status[2]);
1128 /* yes, this is correct: status[3] goes to CHST4 register */
1129 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
1130 cfg->iec60958_cfg->status[3]);
1131 /* yes, this is correct: status[4] goes to CHST5 register */
1132 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
1133 cfg->iec60958_cfg->status[4]);
1134
1135 /* set I2S parameters */
1113 r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL); 1136 r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
1114 r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
1115 r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6); 1137 r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
1116 r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
1117 r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4); 1138 r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
1118 r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
1119 r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2); 1139 r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
1120 r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1); 1140 r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
1121 r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0); 1141 r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
1122 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r); 1142 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
1123 1143
1124 r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_CHST5);
1125 r = FLD_MOD(r, cfg->freq_sample, 7, 4);
1126 r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
1127 r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
1128 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, r);
1129
1130 REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN, 1144 REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
1131 cfg->i2s_cfg.in_length_bits, 3, 0); 1145 cfg->i2s_cfg.in_length_bits, 3, 0);
1132 1146
@@ -1138,12 +1152,19 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
1138 r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2); 1152 r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
1139 r = FLD_MOD(r, cfg->en_spdif, 1, 1); 1153 r = FLD_MOD(r, cfg->en_spdif, 1, 1);
1140 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r); 1154 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
1155
1156 /* Audio channel mappings */
1157 /* TODO: Make channel mapping dynamic. For now, map channels
1158 * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
1159 * HDMI speaker order is different. See CEA-861 Section 6.6.2.
1160 */
1161 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
1162 REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
1141} 1163}
1142 1164
1143void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data, 1165static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
1144 struct hdmi_core_infoframe_audio *info_aud) 1166 struct snd_cea_861_aud_if *info_aud)
1145{ 1167{
1146 u8 val;
1147 u8 sum = 0, checksum = 0; 1168 u8 sum = 0, checksum = 0;
1148 void __iomem *av_base = hdmi_av_base(ip_data); 1169 void __iomem *av_base = hdmi_av_base(ip_data);
1149 1170
@@ -1157,24 +1178,23 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
1157 hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a); 1178 hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
1158 sum += 0x84 + 0x001 + 0x00a; 1179 sum += 0x84 + 0x001 + 0x00a;
1159 1180
1160 val = (info_aud->db1_coding_type << 4) 1181 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
1161 | (info_aud->db1_channel_count - 1); 1182 info_aud->db1_ct_cc);
1162 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), val); 1183 sum += info_aud->db1_ct_cc;
1163 sum += val;
1164 1184
1165 val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size; 1185 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
1166 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), val); 1186 info_aud->db2_sf_ss);
1167 sum += val; 1187 sum += info_aud->db2_sf_ss;
1168 1188
1169 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), 0x00); 1189 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
1190 sum += info_aud->db3;
1170 1191
1171 val = info_aud->db4_channel_alloc; 1192 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
1172 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), val); 1193 sum += info_aud->db4_ca;
1173 sum += val;
1174 1194
1175 val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3); 1195 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
1176 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), val); 1196 info_aud->db5_dminh_lsv);
1177 sum += val; 1197 sum += info_aud->db5_dminh_lsv;
1178 1198
1179 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00); 1199 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
1180 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00); 1200 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
@@ -1192,70 +1212,212 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
1192 */ 1212 */
1193} 1213}
1194 1214
1195int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data, 1215int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
1196 u32 sample_freq, u32 *n, u32 *cts) 1216 struct omap_dss_audio *audio)
1197{ 1217{
1198 u32 r; 1218 struct hdmi_audio_format audio_format;
1199 u32 deep_color = 0; 1219 struct hdmi_audio_dma audio_dma;
1200 u32 pclk = ip_data->cfg.timings.pixel_clock; 1220 struct hdmi_core_audio_config core;
1201 1221 int err, n, cts, channel_count;
1202 if (n == NULL || cts == NULL) 1222 unsigned int fs_nr;
1223 bool word_length_16b = false;
1224
1225 if (!audio || !audio->iec || !audio->cea || !ip_data)
1203 return -EINVAL; 1226 return -EINVAL;
1227
1228 core.iec60958_cfg = audio->iec;
1204 /* 1229 /*
1205 * Obtain current deep color configuration. This needed 1230 * In the IEC-60958 status word, check if the audio sample word length
1206 * to calculate the TMDS clock based on the pixel clock. 1231 * is 16-bit as several optimizations can be performed in such case.
1207 */ 1232 */
1208 r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0); 1233 if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
1209 switch (r) { 1234 if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
1210 case 1: /* No deep color selected */ 1235 word_length_16b = true;
1211 deep_color = 100; 1236
1237 /* I2S configuration. See Phillips' specification */
1238 if (word_length_16b)
1239 core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
1240 else
1241 core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
1242 /*
1243 * The I2S input word length is twice the lenght given in the IEC-60958
1244 * status word. If the word size is greater than
1245 * 20 bits, increment by one.
1246 */
1247 core.i2s_cfg.in_length_bits = audio->iec->status[4]
1248 & IEC958_AES4_CON_WORDLEN;
1249 if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
1250 core.i2s_cfg.in_length_bits++;
1251 core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
1252 core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
1253 core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
1254 core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
1255
1256 /* convert sample frequency to a number */
1257 switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
1258 case IEC958_AES3_CON_FS_32000:
1259 fs_nr = 32000;
1260 break;
1261 case IEC958_AES3_CON_FS_44100:
1262 fs_nr = 44100;
1263 break;
1264 case IEC958_AES3_CON_FS_48000:
1265 fs_nr = 48000;
1212 break; 1266 break;
1213 case 2: /* 10-bit deep color selected */ 1267 case IEC958_AES3_CON_FS_88200:
1214 deep_color = 125; 1268 fs_nr = 88200;
1215 break; 1269 break;
1216 case 3: /* 12-bit deep color selected */ 1270 case IEC958_AES3_CON_FS_96000:
1217 deep_color = 150; 1271 fs_nr = 96000;
1272 break;
1273 case IEC958_AES3_CON_FS_176400:
1274 fs_nr = 176400;
1275 break;
1276 case IEC958_AES3_CON_FS_192000:
1277 fs_nr = 192000;
1218 break; 1278 break;
1219 default: 1279 default:
1220 return -EINVAL; 1280 return -EINVAL;
1221 } 1281 }
1222 1282
1223 switch (sample_freq) { 1283 err = hdmi_compute_acr(fs_nr, &n, &cts);
1224 case 32000: 1284
1225 if ((deep_color == 125) && ((pclk == 54054) 1285 /* Audio clock regeneration settings */
1226 || (pclk == 74250))) 1286 core.n = n;
1227 *n = 8192; 1287 core.cts = cts;
1228 else 1288 if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
1229 *n = 4096; 1289 core.aud_par_busclk = 0;
1290 core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
1291 core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
1292 } else {
1293 core.aud_par_busclk = (((128 * 31) - 1) << 8);
1294 core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
1295 core.use_mclk = true;
1296 }
1297
1298 if (core.use_mclk)
1299 core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
1300
1301 /* Audio channels settings */
1302 channel_count = (audio->cea->db1_ct_cc &
1303 CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
1304
1305 switch (channel_count) {
1306 case 2:
1307 audio_format.active_chnnls_msk = 0x03;
1308 break;
1309 case 3:
1310 audio_format.active_chnnls_msk = 0x07;
1311 break;
1312 case 4:
1313 audio_format.active_chnnls_msk = 0x0f;
1314 break;
1315 case 5:
1316 audio_format.active_chnnls_msk = 0x1f;
1230 break; 1317 break;
1231 case 44100: 1318 case 6:
1232 *n = 6272; 1319 audio_format.active_chnnls_msk = 0x3f;
1233 break; 1320 break;
1234 case 48000: 1321 case 7:
1235 if ((deep_color == 125) && ((pclk == 54054) 1322 audio_format.active_chnnls_msk = 0x7f;
1236 || (pclk == 74250))) 1323 break;
1237 *n = 8192; 1324 case 8:
1238 else 1325 audio_format.active_chnnls_msk = 0xff;
1239 *n = 6144;
1240 break; 1326 break;
1241 default: 1327 default:
1242 *n = 0;
1243 return -EINVAL; 1328 return -EINVAL;
1244 } 1329 }
1245 1330
1246 /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */ 1331 /*
1247 *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10); 1332 * the HDMI IP needs to enable four stereo channels when transmitting
1333 * more than 2 audio channels
1334 */
1335 if (channel_count == 2) {
1336 audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
1337 core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
1338 core.layout = HDMI_AUDIO_LAYOUT_2CH;
1339 } else {
1340 audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
1341 core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
1342 HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
1343 HDMI_AUDIO_I2S_SD3_EN;
1344 core.layout = HDMI_AUDIO_LAYOUT_8CH;
1345 }
1346
1347 core.en_spdif = false;
1348 /* use sample frequency from channel status word */
1349 core.fs_override = true;
1350 /* enable ACR packets */
1351 core.en_acr_pkt = true;
1352 /* disable direct streaming digital audio */
1353 core.en_dsd_audio = false;
1354 /* use parallel audio interface */
1355 core.en_parallel_aud_input = true;
1356
1357 /* DMA settings */
1358 if (word_length_16b)
1359 audio_dma.transfer_size = 0x10;
1360 else
1361 audio_dma.transfer_size = 0x20;
1362 audio_dma.block_size = 0xC0;
1363 audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
1364 audio_dma.fifo_threshold = 0x20; /* in number of samples */
1365
1366 /* audio FIFO format settings */
1367 if (word_length_16b) {
1368 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
1369 audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
1370 audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
1371 } else {
1372 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
1373 audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
1374 audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
1375 }
1376 audio_format.type = HDMI_AUDIO_TYPE_LPCM;
1377 audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
1378 /* disable start/stop signals of IEC 60958 blocks */
1379 audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
1380
1381 /* configure DMA and audio FIFO format*/
1382 ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
1383 ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
1384
1385 /* configure the core*/
1386 ti_hdmi_4xxx_core_audio_config(ip_data, &core);
1387
1388 /* configure CEA 861 audio infoframe*/
1389 ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
1248 1390
1249 return 0; 1391 return 0;
1250} 1392}
1251 1393
1252void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable) 1394int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
1395{
1396 REG_FLD_MOD(hdmi_wp_base(ip_data),
1397 HDMI_WP_AUDIO_CTRL, true, 31, 31);
1398 return 0;
1399}
1400
1401void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
1402{
1403 REG_FLD_MOD(hdmi_wp_base(ip_data),
1404 HDMI_WP_AUDIO_CTRL, false, 31, 31);
1405}
1406
1407int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
1253{ 1408{
1254 REG_FLD_MOD(hdmi_av_base(ip_data), 1409 REG_FLD_MOD(hdmi_av_base(ip_data),
1255 HDMI_CORE_AV_AUD_MODE, enable, 0, 0); 1410 HDMI_CORE_AV_AUD_MODE, true, 0, 0);
1256 REG_FLD_MOD(hdmi_wp_base(ip_data), 1411 REG_FLD_MOD(hdmi_wp_base(ip_data),
1257 HDMI_WP_AUDIO_CTRL, enable, 31, 31); 1412 HDMI_WP_AUDIO_CTRL, true, 30, 30);
1413 return 0;
1414}
1415
1416void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
1417{
1418 REG_FLD_MOD(hdmi_av_base(ip_data),
1419 HDMI_CORE_AV_AUD_MODE, false, 0, 0);
1258 REG_FLD_MOD(hdmi_wp_base(ip_data), 1420 REG_FLD_MOD(hdmi_wp_base(ip_data),
1259 HDMI_WP_AUDIO_CTRL, enable, 30, 30); 1421 HDMI_WP_AUDIO_CTRL, false, 30, 30);
1260} 1422}
1261#endif 1423#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
index a14d1a0e6e41..8366ae19e82e 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
@@ -24,11 +24,6 @@
24#include <linux/string.h> 24#include <linux/string.h>
25#include <video/omapdss.h> 25#include <video/omapdss.h>
26#include "ti_hdmi.h" 26#include "ti_hdmi.h"
27#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
28 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
29#include <sound/soc.h>
30#include <sound/pcm_params.h>
31#endif
32 27
33/* HDMI Wrapper */ 28/* HDMI Wrapper */
34 29
@@ -57,6 +52,13 @@
57#define HDMI_CORE_SYS_SRST 0x14 52#define HDMI_CORE_SYS_SRST 0x14
58#define HDMI_CORE_CTRL1 0x20 53#define HDMI_CORE_CTRL1 0x20
59#define HDMI_CORE_SYS_SYS_STAT 0x24 54#define HDMI_CORE_SYS_SYS_STAT 0x24
55#define HDMI_CORE_SYS_DE_DLY 0xC8
56#define HDMI_CORE_SYS_DE_CTRL 0xCC
57#define HDMI_CORE_SYS_DE_TOP 0xD0
58#define HDMI_CORE_SYS_DE_CNTL 0xD8
59#define HDMI_CORE_SYS_DE_CNTH 0xDC
60#define HDMI_CORE_SYS_DE_LINL 0xE0
61#define HDMI_CORE_SYS_DE_LINH_1 0xE4
60#define HDMI_CORE_SYS_VID_ACEN 0x124 62#define HDMI_CORE_SYS_VID_ACEN 0x124
61#define HDMI_CORE_SYS_VID_MODE 0x128 63#define HDMI_CORE_SYS_VID_MODE 0x128
62#define HDMI_CORE_SYS_INTR_STATE 0x1C0 64#define HDMI_CORE_SYS_INTR_STATE 0x1C0
@@ -66,50 +68,24 @@
66#define HDMI_CORE_SYS_INTR4 0x1D0 68#define HDMI_CORE_SYS_INTR4 0x1D0
67#define HDMI_CORE_SYS_UMASK1 0x1D4 69#define HDMI_CORE_SYS_UMASK1 0x1D4
68#define HDMI_CORE_SYS_TMDS_CTRL 0x208 70#define HDMI_CORE_SYS_TMDS_CTRL 0x208
69#define HDMI_CORE_SYS_DE_DLY 0xC8 71
70#define HDMI_CORE_SYS_DE_CTRL 0xCC
71#define HDMI_CORE_SYS_DE_TOP 0xD0
72#define HDMI_CORE_SYS_DE_CNTL 0xD8
73#define HDMI_CORE_SYS_DE_CNTH 0xDC
74#define HDMI_CORE_SYS_DE_LINL 0xE0
75#define HDMI_CORE_SYS_DE_LINH_1 0xE4
76#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1 72#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
77#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1 73#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
78#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1 74#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
79#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1 75#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
80 76
81/* HDMI DDC E-DID */ 77/* HDMI DDC E-DID */
82#define HDMI_CORE_DDC_CMD 0x3CC
83#define HDMI_CORE_DDC_STATUS 0x3C8
84#define HDMI_CORE_DDC_ADDR 0x3B4 78#define HDMI_CORE_DDC_ADDR 0x3B4
79#define HDMI_CORE_DDC_SEGM 0x3B8
85#define HDMI_CORE_DDC_OFFSET 0x3BC 80#define HDMI_CORE_DDC_OFFSET 0x3BC
86#define HDMI_CORE_DDC_COUNT1 0x3C0 81#define HDMI_CORE_DDC_COUNT1 0x3C0
87#define HDMI_CORE_DDC_COUNT2 0x3C4 82#define HDMI_CORE_DDC_COUNT2 0x3C4
83#define HDMI_CORE_DDC_STATUS 0x3C8
84#define HDMI_CORE_DDC_CMD 0x3CC
88#define HDMI_CORE_DDC_DATA 0x3D0 85#define HDMI_CORE_DDC_DATA 0x3D0
89#define HDMI_CORE_DDC_SEGM 0x3B8
90 86
91/* HDMI IP Core Audio Video */ 87/* HDMI IP Core Audio Video */
92 88
93#define HDMI_CORE_AV_HDMI_CTRL 0xBC
94#define HDMI_CORE_AV_DPD 0xF4
95#define HDMI_CORE_AV_PB_CTRL1 0xF8
96#define HDMI_CORE_AV_PB_CTRL2 0xFC
97#define HDMI_CORE_AV_AVI_TYPE 0x100
98#define HDMI_CORE_AV_AVI_VERS 0x104
99#define HDMI_CORE_AV_AVI_LEN 0x108
100#define HDMI_CORE_AV_AVI_CHSUM 0x10C
101#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110)
102#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15
103#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190)
104#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27
105#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210)
106#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10
107#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290)
108#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27
109#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300)
110#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
111#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380)
112#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
113#define HDMI_CORE_AV_ACR_CTRL 0x4 89#define HDMI_CORE_AV_ACR_CTRL 0x4
114#define HDMI_CORE_AV_FREQ_SVAL 0x8 90#define HDMI_CORE_AV_FREQ_SVAL 0x8
115#define HDMI_CORE_AV_N_SVAL1 0xC 91#define HDMI_CORE_AV_N_SVAL1 0xC
@@ -148,25 +124,39 @@
148#define HDMI_CORE_AV_AVI_VERS 0x104 124#define HDMI_CORE_AV_AVI_VERS 0x104
149#define HDMI_CORE_AV_AVI_LEN 0x108 125#define HDMI_CORE_AV_AVI_LEN 0x108
150#define HDMI_CORE_AV_AVI_CHSUM 0x10C 126#define HDMI_CORE_AV_AVI_CHSUM 0x10C
127#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110)
151#define HDMI_CORE_AV_SPD_TYPE 0x180 128#define HDMI_CORE_AV_SPD_TYPE 0x180
152#define HDMI_CORE_AV_SPD_VERS 0x184 129#define HDMI_CORE_AV_SPD_VERS 0x184
153#define HDMI_CORE_AV_SPD_LEN 0x188 130#define HDMI_CORE_AV_SPD_LEN 0x188
154#define HDMI_CORE_AV_SPD_CHSUM 0x18C 131#define HDMI_CORE_AV_SPD_CHSUM 0x18C
132#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190)
155#define HDMI_CORE_AV_AUDIO_TYPE 0x200 133#define HDMI_CORE_AV_AUDIO_TYPE 0x200
156#define HDMI_CORE_AV_AUDIO_VERS 0x204 134#define HDMI_CORE_AV_AUDIO_VERS 0x204
157#define HDMI_CORE_AV_AUDIO_LEN 0x208 135#define HDMI_CORE_AV_AUDIO_LEN 0x208
158#define HDMI_CORE_AV_AUDIO_CHSUM 0x20C 136#define HDMI_CORE_AV_AUDIO_CHSUM 0x20C
137#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210)
159#define HDMI_CORE_AV_MPEG_TYPE 0x280 138#define HDMI_CORE_AV_MPEG_TYPE 0x280
160#define HDMI_CORE_AV_MPEG_VERS 0x284 139#define HDMI_CORE_AV_MPEG_VERS 0x284
161#define HDMI_CORE_AV_MPEG_LEN 0x288 140#define HDMI_CORE_AV_MPEG_LEN 0x288
162#define HDMI_CORE_AV_MPEG_CHSUM 0x28C 141#define HDMI_CORE_AV_MPEG_CHSUM 0x28C
142#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290)
143#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300)
163#define HDMI_CORE_AV_CP_BYTE1 0x37C 144#define HDMI_CORE_AV_CP_BYTE1 0x37C
145#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380)
164#define HDMI_CORE_AV_CEC_ADDR_ID 0x3FC 146#define HDMI_CORE_AV_CEC_ADDR_ID 0x3FC
147
165#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4 148#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4
166#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4 149#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4
167#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4 150#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4
168#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4 151#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4
169 152
153#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15
154#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27
155#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10
156#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27
157#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
158#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
159
170/* PLL */ 160/* PLL */
171 161
172#define PLLCTRL_PLL_CONTROL 0x0 162#define PLLCTRL_PLL_CONTROL 0x0
@@ -284,35 +274,6 @@ enum hdmi_core_infoframe {
284 HDMI_INFOFRAME_AVI_DB5PR_8 = 7, 274 HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
285 HDMI_INFOFRAME_AVI_DB5PR_9 = 8, 275 HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
286 HDMI_INFOFRAME_AVI_DB5PR_10 = 9, 276 HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
287 HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
288 HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
289 HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
290 HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
291 HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
292 HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
293 HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
294 HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
295 HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
296 HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
297 HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
298 HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
299 HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
300 HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
301 HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
302 HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
303 HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
304 HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
305 HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
306 HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
307 HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
308 HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
309 HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
310 HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
311 HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
312 HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
313 HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
314 HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
315 HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
316}; 277};
317 278
318enum hdmi_packing_mode { 279enum hdmi_packing_mode {
@@ -322,17 +283,6 @@ enum hdmi_packing_mode {
322 HDMI_PACK_ALREADYPACKED = 7 283 HDMI_PACK_ALREADYPACKED = 7
323}; 284};
324 285
325enum hdmi_core_audio_sample_freq {
326 HDMI_AUDIO_FS_32000 = 0x3,
327 HDMI_AUDIO_FS_44100 = 0x0,
328 HDMI_AUDIO_FS_48000 = 0x2,
329 HDMI_AUDIO_FS_88200 = 0x8,
330 HDMI_AUDIO_FS_96000 = 0xA,
331 HDMI_AUDIO_FS_176400 = 0xC,
332 HDMI_AUDIO_FS_192000 = 0xE,
333 HDMI_AUDIO_FS_NOT_INDICATED = 0x1
334};
335
336enum hdmi_core_audio_layout { 286enum hdmi_core_audio_layout {
337 HDMI_AUDIO_LAYOUT_2CH = 0, 287 HDMI_AUDIO_LAYOUT_2CH = 0,
338 HDMI_AUDIO_LAYOUT_8CH = 1 288 HDMI_AUDIO_LAYOUT_8CH = 1
@@ -387,37 +337,12 @@ enum hdmi_audio_blk_strt_end_sig {
387}; 337};
388 338
389enum hdmi_audio_i2s_config { 339enum hdmi_audio_i2s_config {
390 HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
391 HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
392 HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0, 340 HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
393 HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1, 341 HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
394 HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
395 HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
396 HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
397 HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
398 HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
399 HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
400 HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
401 HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
402 HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
403 HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
404 HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
405 HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
406 HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
407 HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0, 342 HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
408 HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1, 343 HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
409 HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0, 344 HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
410 HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1, 345 HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
411 HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
412 HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
413 HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
414 HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
415 HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
416 HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
417 HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
418 HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
419 HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
420 HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
421 HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0, 346 HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
422 HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1, 347 HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
423 HDMI_AUDIO_I2S_SD0_EN = 1, 348 HDMI_AUDIO_I2S_SD0_EN = 1,
@@ -446,20 +371,6 @@ struct hdmi_core_video_config {
446 enum hdmi_core_tclkselclkmult tclk_sel_clkmult; 371 enum hdmi_core_tclkselclkmult tclk_sel_clkmult;
447}; 372};
448 373
449/*
450 * Refer to section 8.2 in HDMI 1.3 specification for
451 * details about infoframe databytes
452 */
453struct hdmi_core_infoframe_audio {
454 u8 db1_coding_type;
455 u8 db1_channel_count;
456 u8 db2_sample_freq;
457 u8 db2_sample_size;
458 u8 db4_channel_alloc;
459 bool db5_downmix_inh;
460 u8 db5_lsv; /* Level shift values for downmix */
461};
462
463struct hdmi_core_packet_enable_repeat { 374struct hdmi_core_packet_enable_repeat {
464 u32 audio_pkt; 375 u32 audio_pkt;
465 u32 audio_pkt_repeat; 376 u32 audio_pkt_repeat;
@@ -496,15 +407,10 @@ struct hdmi_audio_dma {
496}; 407};
497 408
498struct hdmi_core_audio_i2s_config { 409struct hdmi_core_audio_i2s_config {
499 u8 word_max_length;
500 u8 word_length;
501 u8 in_length_bits; 410 u8 in_length_bits;
502 u8 justification; 411 u8 justification;
503 u8 en_high_bitrate_aud;
504 u8 sck_edge_mode; 412 u8 sck_edge_mode;
505 u8 cbit_order;
506 u8 vbit; 413 u8 vbit;
507 u8 ws_polarity;
508 u8 direction; 414 u8 direction;
509 u8 shift; 415 u8 shift;
510 u8 active_sds; 416 u8 active_sds;
@@ -512,7 +418,7 @@ struct hdmi_core_audio_i2s_config {
512 418
513struct hdmi_core_audio_config { 419struct hdmi_core_audio_config {
514 struct hdmi_core_audio_i2s_config i2s_cfg; 420 struct hdmi_core_audio_i2s_config i2s_cfg;
515 enum hdmi_core_audio_sample_freq freq_sample; 421 struct snd_aes_iec958 *iec60958_cfg;
516 bool fs_override; 422 bool fs_override;
517 u32 n; 423 u32 n;
518 u32 cts; 424 u32 cts;
@@ -527,17 +433,4 @@ struct hdmi_core_audio_config {
527 bool en_spdif; 433 bool en_spdif;
528}; 434};
529 435
530#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
531 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
532int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
533 u32 sample_freq, u32 *n, u32 *cts);
534void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
535 struct hdmi_core_infoframe_audio *info_aud);
536void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
537 struct hdmi_core_audio_config *cfg);
538void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
539 struct hdmi_audio_dma *aud_dma);
540void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
541 struct hdmi_audio_format *aud_fmt);
542#endif
543#endif 436#endif
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 9c3daf71750c..2b8973931ff4 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -415,6 +415,7 @@ static const struct venc_config *venc_timings_to_config(
415 return &venc_config_ntsc_trm; 415 return &venc_config_ntsc_trm;
416 416
417 BUG(); 417 BUG();
418 return NULL;
418} 419}
419 420
420static int venc_power_on(struct omap_dss_device *dssdev) 421static int venc_power_on(struct omap_dss_device *dssdev)
@@ -440,10 +441,11 @@ static int venc_power_on(struct omap_dss_device *dssdev)
440 441
441 venc_write_reg(VENC_OUTPUT_CONTROL, l); 442 venc_write_reg(VENC_OUTPUT_CONTROL, l);
442 443
443 dispc_set_digit_size(dssdev->panel.timings.x_res, 444 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
444 dssdev->panel.timings.y_res/2);
445 445
446 regulator_enable(venc.vdda_dac_reg); 446 r = regulator_enable(venc.vdda_dac_reg);
447 if (r)
448 goto err;
447 449
448 if (dssdev->platform_enable) 450 if (dssdev->platform_enable)
449 dssdev->platform_enable(dssdev); 451 dssdev->platform_enable(dssdev);
@@ -485,16 +487,68 @@ unsigned long venc_get_pixel_clock(void)
485 return 13500000; 487 return 13500000;
486} 488}
487 489
490static ssize_t display_output_type_show(struct device *dev,
491 struct device_attribute *attr, char *buf)
492{
493 struct omap_dss_device *dssdev = to_dss_device(dev);
494 const char *ret;
495
496 switch (dssdev->phy.venc.type) {
497 case OMAP_DSS_VENC_TYPE_COMPOSITE:
498 ret = "composite";
499 break;
500 case OMAP_DSS_VENC_TYPE_SVIDEO:
501 ret = "svideo";
502 break;
503 default:
504 return -EINVAL;
505 }
506
507 return snprintf(buf, PAGE_SIZE, "%s\n", ret);
508}
509
510static ssize_t display_output_type_store(struct device *dev,
511 struct device_attribute *attr, const char *buf, size_t size)
512{
513 struct omap_dss_device *dssdev = to_dss_device(dev);
514 enum omap_dss_venc_type new_type;
515
516 if (sysfs_streq("composite", buf))
517 new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
518 else if (sysfs_streq("svideo", buf))
519 new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
520 else
521 return -EINVAL;
522
523 mutex_lock(&venc.venc_lock);
524
525 if (dssdev->phy.venc.type != new_type) {
526 dssdev->phy.venc.type = new_type;
527 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
528 venc_power_off(dssdev);
529 venc_power_on(dssdev);
530 }
531 }
532
533 mutex_unlock(&venc.venc_lock);
534
535 return size;
536}
537
538static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR,
539 display_output_type_show, display_output_type_store);
540
488/* driver */ 541/* driver */
489static int venc_panel_probe(struct omap_dss_device *dssdev) 542static int venc_panel_probe(struct omap_dss_device *dssdev)
490{ 543{
491 dssdev->panel.timings = omap_dss_pal_timings; 544 dssdev->panel.timings = omap_dss_pal_timings;
492 545
493 return 0; 546 return device_create_file(&dssdev->dev, &dev_attr_output_type);
494} 547}
495 548
496static void venc_panel_remove(struct omap_dss_device *dssdev) 549static void venc_panel_remove(struct omap_dss_device *dssdev)
497{ 550{
551 device_remove_file(&dssdev->dev, &dev_attr_output_type);
498} 552}
499 553
500static int venc_panel_enable(struct omap_dss_device *dssdev) 554static int venc_panel_enable(struct omap_dss_device *dssdev)
@@ -577,12 +631,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
577 return venc_panel_enable(dssdev); 631 return venc_panel_enable(dssdev);
578} 632}
579 633
580static void venc_get_timings(struct omap_dss_device *dssdev,
581 struct omap_video_timings *timings)
582{
583 *timings = dssdev->panel.timings;
584}
585
586static void venc_set_timings(struct omap_dss_device *dssdev, 634static void venc_set_timings(struct omap_dss_device *dssdev,
587 struct omap_video_timings *timings) 635 struct omap_video_timings *timings)
588{ 636{
@@ -597,6 +645,8 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
597 /* turn the venc off and on to get new timings to use */ 645 /* turn the venc off and on to get new timings to use */
598 venc_panel_disable(dssdev); 646 venc_panel_disable(dssdev);
599 venc_panel_enable(dssdev); 647 venc_panel_enable(dssdev);
648 } else {
649 dss_mgr_set_timings(dssdev->manager, timings);
600 } 650 }
601} 651}
602 652
@@ -661,7 +711,6 @@ static struct omap_dss_driver venc_driver = {
661 .get_resolution = omapdss_default_get_resolution, 711 .get_resolution = omapdss_default_get_resolution,
662 .get_recommended_bpp = omapdss_default_get_recommended_bpp, 712 .get_recommended_bpp = omapdss_default_get_recommended_bpp,
663 713
664 .get_timings = venc_get_timings,
665 .set_timings = venc_set_timings, 714 .set_timings = venc_set_timings,
666 .check_timings = venc_check_timings, 715 .check_timings = venc_check_timings,
667 716
@@ -675,7 +724,7 @@ static struct omap_dss_driver venc_driver = {
675}; 724};
676/* driver end */ 725/* driver end */
677 726
678int venc_init_display(struct omap_dss_device *dssdev) 727static int __init venc_init_display(struct omap_dss_device *dssdev)
679{ 728{
680 DSSDBG("init_display\n"); 729 DSSDBG("init_display\n");
681 730
@@ -695,7 +744,7 @@ int venc_init_display(struct omap_dss_device *dssdev)
695 return 0; 744 return 0;
696} 745}
697 746
698void venc_dump_regs(struct seq_file *s) 747static void venc_dump_regs(struct seq_file *s)
699{ 748{
700#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) 749#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
701 750
@@ -779,8 +828,32 @@ static void venc_put_clocks(void)
779 clk_put(venc.tv_dac_clk); 828 clk_put(venc.tv_dac_clk);
780} 829}
781 830
831static void __init venc_probe_pdata(struct platform_device *pdev)
832{
833 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
834 int r, i;
835
836 for (i = 0; i < pdata->num_devices; ++i) {
837 struct omap_dss_device *dssdev = pdata->devices[i];
838
839 if (dssdev->type != OMAP_DISPLAY_TYPE_VENC)
840 continue;
841
842 r = venc_init_display(dssdev);
843 if (r) {
844 DSSERR("device %s init failed: %d\n", dssdev->name, r);
845 continue;
846 }
847
848 r = omap_dss_register_device(dssdev, &pdev->dev, i);
849 if (r)
850 DSSERR("device %s register failed: %d\n",
851 dssdev->name, r);
852 }
853}
854
782/* VENC HW IP initialisation */ 855/* VENC HW IP initialisation */
783static int omap_venchw_probe(struct platform_device *pdev) 856static int __init omap_venchw_probe(struct platform_device *pdev)
784{ 857{
785 u8 rev_id; 858 u8 rev_id;
786 struct resource *venc_mem; 859 struct resource *venc_mem;
@@ -824,6 +897,10 @@ static int omap_venchw_probe(struct platform_device *pdev)
824 if (r) 897 if (r)
825 goto err_reg_panel_driver; 898 goto err_reg_panel_driver;
826 899
900 dss_debugfs_create_file("venc", venc_dump_regs);
901
902 venc_probe_pdata(pdev);
903
827 return 0; 904 return 0;
828 905
829err_reg_panel_driver: 906err_reg_panel_driver:
@@ -833,12 +910,15 @@ err_runtime_get:
833 return r; 910 return r;
834} 911}
835 912
836static int omap_venchw_remove(struct platform_device *pdev) 913static int __exit omap_venchw_remove(struct platform_device *pdev)
837{ 914{
915 omap_dss_unregister_child_devices(&pdev->dev);
916
838 if (venc.vdda_dac_reg != NULL) { 917 if (venc.vdda_dac_reg != NULL) {
839 regulator_put(venc.vdda_dac_reg); 918 regulator_put(venc.vdda_dac_reg);
840 venc.vdda_dac_reg = NULL; 919 venc.vdda_dac_reg = NULL;
841 } 920 }
921
842 omap_dss_unregister_driver(&venc_driver); 922 omap_dss_unregister_driver(&venc_driver);
843 923
844 pm_runtime_disable(&pdev->dev); 924 pm_runtime_disable(&pdev->dev);
@@ -853,7 +933,6 @@ static int venc_runtime_suspend(struct device *dev)
853 clk_disable(venc.tv_dac_clk); 933 clk_disable(venc.tv_dac_clk);
854 934
855 dispc_runtime_put(); 935 dispc_runtime_put();
856 dss_runtime_put();
857 936
858 return 0; 937 return 0;
859} 938}
@@ -862,23 +941,14 @@ static int venc_runtime_resume(struct device *dev)
862{ 941{
863 int r; 942 int r;
864 943
865 r = dss_runtime_get();
866 if (r < 0)
867 goto err_get_dss;
868
869 r = dispc_runtime_get(); 944 r = dispc_runtime_get();
870 if (r < 0) 945 if (r < 0)
871 goto err_get_dispc; 946 return r;
872 947
873 if (venc.tv_dac_clk) 948 if (venc.tv_dac_clk)
874 clk_enable(venc.tv_dac_clk); 949 clk_enable(venc.tv_dac_clk);
875 950
876 return 0; 951 return 0;
877
878err_get_dispc:
879 dss_runtime_put();
880err_get_dss:
881 return r;
882} 952}
883 953
884static const struct dev_pm_ops venc_pm_ops = { 954static const struct dev_pm_ops venc_pm_ops = {
@@ -887,8 +957,7 @@ static const struct dev_pm_ops venc_pm_ops = {
887}; 957};
888 958
889static struct platform_driver omap_venchw_driver = { 959static struct platform_driver omap_venchw_driver = {
890 .probe = omap_venchw_probe, 960 .remove = __exit_p(omap_venchw_remove),
891 .remove = omap_venchw_remove,
892 .driver = { 961 .driver = {
893 .name = "omapdss_venc", 962 .name = "omapdss_venc",
894 .owner = THIS_MODULE, 963 .owner = THIS_MODULE,
@@ -896,18 +965,18 @@ static struct platform_driver omap_venchw_driver = {
896 }, 965 },
897}; 966};
898 967
899int venc_init_platform_driver(void) 968int __init venc_init_platform_driver(void)
900{ 969{
901 if (cpu_is_omap44xx()) 970 if (cpu_is_omap44xx())
902 return 0; 971 return 0;
903 972
904 return platform_driver_register(&omap_venchw_driver); 973 return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
905} 974}
906 975
907void venc_uninit_platform_driver(void) 976void __exit venc_uninit_platform_driver(void)
908{ 977{
909 if (cpu_is_omap44xx()) 978 if (cpu_is_omap44xx())
910 return; 979 return;
911 980
912 return platform_driver_unregister(&omap_venchw_driver); 981 platform_driver_unregister(&omap_venchw_driver);
913} 982}
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 6a09ef87e14f..c6cf372d22c5 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -70,7 +70,7 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
70 70
71 DBG("omapfb_setup_plane\n"); 71 DBG("omapfb_setup_plane\n");
72 72
73 if (ofbi->num_overlays != 1) { 73 if (ofbi->num_overlays == 0) {
74 r = -EINVAL; 74 r = -EINVAL;
75 goto out; 75 goto out;
76 } 76 }
@@ -185,7 +185,7 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
185{ 185{
186 struct omapfb_info *ofbi = FB2OFB(fbi); 186 struct omapfb_info *ofbi = FB2OFB(fbi);
187 187
188 if (ofbi->num_overlays != 1) { 188 if (ofbi->num_overlays == 0) {
189 memset(pi, 0, sizeof(*pi)); 189 memset(pi, 0, sizeof(*pi));
190 } else { 190 } else {
191 struct omap_overlay *ovl; 191 struct omap_overlay *ovl;
@@ -225,6 +225,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
225 down_write_nested(&rg->lock, rg->id); 225 down_write_nested(&rg->lock, rg->id);
226 atomic_inc(&rg->lock_count); 226 atomic_inc(&rg->lock_count);
227 227
228 if (rg->size == size && rg->type == mi->type)
229 goto out;
230
228 if (atomic_read(&rg->map_count)) { 231 if (atomic_read(&rg->map_count)) {
229 r = -EBUSY; 232 r = -EBUSY;
230 goto out; 233 goto out;
@@ -247,12 +250,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
247 } 250 }
248 } 251 }
249 252
250 if (rg->size != size || rg->type != mi->type) { 253 r = omapfb_realloc_fbmem(fbi, size, mi->type);
251 r = omapfb_realloc_fbmem(fbi, size, mi->type); 254 if (r) {
252 if (r) { 255 dev_err(fbdev->dev, "realloc fbmem failed\n");
253 dev_err(fbdev->dev, "realloc fbmem failed\n"); 256 goto out;
254 goto out;
255 }
256 } 257 }
257 258
258 out: 259 out:
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index b00db4068d21..3450ea0966c9 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -179,6 +179,7 @@ static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot)
179 break; 179 break;
180 default: 180 default:
181 BUG(); 181 BUG();
182 return 0;
182 } 183 }
183 184
184 offset *= vrfb->bytespp; 185 offset *= vrfb->bytespp;
@@ -1502,7 +1503,7 @@ static int omapfb_parse_vram_param(const char *param, int max_entries,
1502 1503
1503 fbnum = simple_strtoul(p, &p, 10); 1504 fbnum = simple_strtoul(p, &p, 10);
1504 1505
1505 if (p == param) 1506 if (p == start)
1506 return -EINVAL; 1507 return -EINVAL;
1507 1508
1508 if (*p != ':') 1509 if (*p != ':')
@@ -2307,7 +2308,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
2307 return 0; 2308 return 0;
2308} 2309}
2309 2310
2310static int omapfb_probe(struct platform_device *pdev) 2311static int __init omapfb_probe(struct platform_device *pdev)
2311{ 2312{
2312 struct omapfb2_device *fbdev = NULL; 2313 struct omapfb2_device *fbdev = NULL;
2313 int r = 0; 2314 int r = 0;
@@ -2448,7 +2449,7 @@ err0:
2448 return r; 2449 return r;
2449} 2450}
2450 2451
2451static int omapfb_remove(struct platform_device *pdev) 2452static int __exit omapfb_remove(struct platform_device *pdev)
2452{ 2453{
2453 struct omapfb2_device *fbdev = platform_get_drvdata(pdev); 2454 struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
2454 2455
@@ -2462,8 +2463,7 @@ static int omapfb_remove(struct platform_device *pdev)
2462} 2463}
2463 2464
2464static struct platform_driver omapfb_driver = { 2465static struct platform_driver omapfb_driver = {
2465 .probe = omapfb_probe, 2466 .remove = __exit_p(omapfb_remove),
2466 .remove = omapfb_remove,
2467 .driver = { 2467 .driver = {
2468 .name = "omapfb", 2468 .name = "omapfb",
2469 .owner = THIS_MODULE, 2469 .owner = THIS_MODULE,
@@ -2474,7 +2474,7 @@ static int __init omapfb_init(void)
2474{ 2474{
2475 DBG("omapfb_init\n"); 2475 DBG("omapfb_init\n");
2476 2476
2477 if (platform_driver_register(&omapfb_driver)) { 2477 if (platform_driver_probe(&omapfb_driver, omapfb_probe)) {
2478 printk(KERN_ERR "failed to register omapfb driver\n"); 2478 printk(KERN_ERR "failed to register omapfb driver\n");
2479 return -ENODEV; 2479 return -ENODEV;
2480 } 2480 }
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index c0bdc9b54ecf..30361a09aecd 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -166,6 +166,7 @@ static inline struct omapfb_display_data *get_display_data(
166 166
167 /* This should never happen */ 167 /* This should never happen */
168 BUG(); 168 BUG();
169 return NULL;
169} 170}
170 171
171static inline void omapfb_lock(struct omapfb2_device *fbdev) 172static inline void omapfb_lock(struct omapfb2_device *fbdev)
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
index 4e5b960c32c8..7e990220ad2a 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/omap2/vrfb.c
@@ -179,8 +179,10 @@ void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
179 pixel_size_exp = 2; 179 pixel_size_exp = 2;
180 else if (bytespp == 2) 180 else if (bytespp == 2)
181 pixel_size_exp = 1; 181 pixel_size_exp = 1;
182 else 182 else {
183 BUG(); 183 BUG();
184 return;
185 }
184 186
185 vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp; 187 vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
186 vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT); 188 vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 1d71c08a818f..0b4ae0cebeda 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -316,12 +316,9 @@ pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
316 ret = wait_event_interruptible_timeout(priv->wait_idle, 316 ret = wait_event_interruptible_timeout(priv->wait_idle,
317 !priv->shared->hw_running, HZ*4); 317 !priv->shared->hw_running, HZ*4);
318 318
319 if (ret < 0) 319 if (ret != 0)
320 break; 320 break;
321 321
322 if (ret > 0)
323 continue;
324
325 if (gc_readl(priv, REG_GCRBEXHR) == rbexhr && 322 if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
326 priv->shared->num_interrupts == num) { 323 priv->shared->num_interrupts == num) {
327 QERROR("TIMEOUT"); 324 QERROR("TIMEOUT");
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index f3105160bf98..ea7b661e7229 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -47,7 +47,7 @@
47#ifdef CONFIG_FB_S3C_DEBUG_REGWRITE 47#ifdef CONFIG_FB_S3C_DEBUG_REGWRITE
48#undef writel 48#undef writel
49#define writel(v, r) do { \ 49#define writel(v, r) do { \
50 printk(KERN_DEBUG "%s: %08x => %p\n", __func__, (unsigned int)v, r); \ 50 pr_debug("%s: %08x => %p\n", __func__, (unsigned int)v, r); \
51 __raw_writel(v, r); \ 51 __raw_writel(v, r); \
52} while (0) 52} while (0)
53#endif /* FB_S3C_DEBUG_REGWRITE */ 53#endif /* FB_S3C_DEBUG_REGWRITE */
@@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
361 result = (unsigned int)tmp / 1000; 361 result = (unsigned int)tmp / 1000;
362 362
363 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", 363 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
364 pixclk, clk, result, clk / result); 364 pixclk, clk, result, result ? clk / result : clk);
365 365
366 return result; 366 return result;
367} 367}
@@ -495,7 +495,6 @@ static int s3c_fb_set_par(struct fb_info *info)
495 u32 alpha = 0; 495 u32 alpha = 0;
496 u32 data; 496 u32 data;
497 u32 pagewidth; 497 u32 pagewidth;
498 int clkdiv;
499 498
500 dev_dbg(sfb->dev, "setting framebuffer parameters\n"); 499 dev_dbg(sfb->dev, "setting framebuffer parameters\n");
501 500
@@ -532,48 +531,9 @@ static int s3c_fb_set_par(struct fb_info *info)
532 /* disable the window whilst we update it */ 531 /* disable the window whilst we update it */
533 writel(0, regs + WINCON(win_no)); 532 writel(0, regs + WINCON(win_no));
534 533
535 /* use platform specified window as the basis for the lcd timings */ 534 if (!sfb->output_on)
536
537 if (win_no == sfb->pdata->default_win) {
538 clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
539
540 data = sfb->pdata->vidcon0;
541 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
542
543 if (clkdiv > 1)
544 data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
545 else
546 data &= ~VIDCON0_CLKDIR; /* 1:1 clock */
547
548 /* write the timing data to the panel */
549
550 if (sfb->variant.is_2443)
551 data |= (1 << 5);
552
553 writel(data, regs + VIDCON0);
554
555 s3c_fb_enable(sfb, 1); 535 s3c_fb_enable(sfb, 1);
556 536
557 data = VIDTCON0_VBPD(var->upper_margin - 1) |
558 VIDTCON0_VFPD(var->lower_margin - 1) |
559 VIDTCON0_VSPW(var->vsync_len - 1);
560
561 writel(data, regs + sfb->variant.vidtcon);
562
563 data = VIDTCON1_HBPD(var->left_margin - 1) |
564 VIDTCON1_HFPD(var->right_margin - 1) |
565 VIDTCON1_HSPW(var->hsync_len - 1);
566
567 /* VIDTCON1 */
568 writel(data, regs + sfb->variant.vidtcon + 4);
569
570 data = VIDTCON2_LINEVAL(var->yres - 1) |
571 VIDTCON2_HOZVAL(var->xres - 1) |
572 VIDTCON2_LINEVAL_E(var->yres - 1) |
573 VIDTCON2_HOZVAL_E(var->xres - 1);
574 writel(data, regs + sfb->variant.vidtcon + 8);
575 }
576
577 /* write the buffer address */ 537 /* write the buffer address */
578 538
579 /* start and end registers stride is 8 */ 539 /* start and end registers stride is 8 */
@@ -839,6 +799,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
839 struct s3c_fb *sfb = win->parent; 799 struct s3c_fb *sfb = win->parent;
840 unsigned int index = win->index; 800 unsigned int index = win->index;
841 u32 wincon; 801 u32 wincon;
802 u32 output_on = sfb->output_on;
842 803
843 dev_dbg(sfb->dev, "blank mode %d\n", blank_mode); 804 dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
844 805
@@ -877,34 +838,18 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
877 838
878 shadow_protect_win(win, 1); 839 shadow_protect_win(win, 1);
879 writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4)); 840 writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
880 shadow_protect_win(win, 0);
881 841
882 /* Check the enabled state to see if we need to be running the 842 /* Check the enabled state to see if we need to be running the
883 * main LCD interface, as if there are no active windows then 843 * main LCD interface, as if there are no active windows then
884 * it is highly likely that we also do not need to output 844 * it is highly likely that we also do not need to output
885 * anything. 845 * anything.
886 */ 846 */
887 847 s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
888 /* We could do something like the following code, but the current 848 shadow_protect_win(win, 0);
889 * system of using framebuffer events means that we cannot make
890 * the distinction between just window 0 being inactive and all
891 * the windows being down.
892 *
893 * s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
894 */
895
896 /* we're stuck with this until we can do something about overriding
897 * the power control using the blanking event for a single fb.
898 */
899 if (index == sfb->pdata->default_win) {
900 shadow_protect_win(win, 1);
901 s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
902 shadow_protect_win(win, 0);
903 }
904 849
905 pm_runtime_put_sync(sfb->dev); 850 pm_runtime_put_sync(sfb->dev);
906 851
907 return 0; 852 return output_on == sfb->output_on;
908} 853}
909 854
910/** 855/**
@@ -1111,7 +1056,7 @@ static struct fb_ops s3c_fb_ops = {
1111 * 1056 *
1112 * Calculate the pixel clock when none has been given through platform data. 1057 * Calculate the pixel clock when none has been given through platform data.
1113 */ 1058 */
1114static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode) 1059static void s3c_fb_missing_pixclock(struct fb_videomode *mode)
1115{ 1060{
1116 u64 pixclk = 1000000000000ULL; 1061 u64 pixclk = 1000000000000ULL;
1117 u32 div; 1062 u32 div;
@@ -1144,11 +1089,11 @@ static int __devinit s3c_fb_alloc_memory(struct s3c_fb *sfb,
1144 1089
1145 dev_dbg(sfb->dev, "allocating memory for display\n"); 1090 dev_dbg(sfb->dev, "allocating memory for display\n");
1146 1091
1147 real_size = windata->win_mode.xres * windata->win_mode.yres; 1092 real_size = windata->xres * windata->yres;
1148 virt_size = windata->virtual_x * windata->virtual_y; 1093 virt_size = windata->virtual_x * windata->virtual_y;
1149 1094
1150 dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n", 1095 dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n",
1151 real_size, windata->win_mode.xres, windata->win_mode.yres, 1096 real_size, windata->xres, windata->yres,
1152 virt_size, windata->virtual_x, windata->virtual_y); 1097 virt_size, windata->virtual_x, windata->virtual_y);
1153 1098
1154 size = (real_size > virt_size) ? real_size : virt_size; 1099 size = (real_size > virt_size) ? real_size : virt_size;
@@ -1230,7 +1175,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
1230 struct s3c_fb_win **res) 1175 struct s3c_fb_win **res)
1231{ 1176{
1232 struct fb_var_screeninfo *var; 1177 struct fb_var_screeninfo *var;
1233 struct fb_videomode *initmode; 1178 struct fb_videomode initmode;
1234 struct s3c_fb_pd_win *windata; 1179 struct s3c_fb_pd_win *windata;
1235 struct s3c_fb_win *win; 1180 struct s3c_fb_win *win;
1236 struct fb_info *fbinfo; 1181 struct fb_info *fbinfo;
@@ -1251,11 +1196,11 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
1251 } 1196 }
1252 1197
1253 windata = sfb->pdata->win[win_no]; 1198 windata = sfb->pdata->win[win_no];
1254 initmode = &windata->win_mode; 1199 initmode = *sfb->pdata->vtiming;
1255 1200
1256 WARN_ON(windata->max_bpp == 0); 1201 WARN_ON(windata->max_bpp == 0);
1257 WARN_ON(windata->win_mode.xres == 0); 1202 WARN_ON(windata->xres == 0);
1258 WARN_ON(windata->win_mode.yres == 0); 1203 WARN_ON(windata->yres == 0);
1259 1204
1260 win = fbinfo->par; 1205 win = fbinfo->par;
1261 *res = win; 1206 *res = win;
@@ -1294,7 +1239,9 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
1294 } 1239 }
1295 1240
1296 /* setup the initial video mode from the window */ 1241 /* setup the initial video mode from the window */
1297 fb_videomode_to_var(&fbinfo->var, initmode); 1242 initmode.xres = windata->xres;
1243 initmode.yres = windata->yres;
1244 fb_videomode_to_var(&fbinfo->var, &initmode);
1298 1245
1299 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; 1246 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
1300 fbinfo->fix.accel = FB_ACCEL_NONE; 1247 fbinfo->fix.accel = FB_ACCEL_NONE;
@@ -1339,6 +1286,53 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
1339} 1286}
1340 1287
1341/** 1288/**
1289 * s3c_fb_set_rgb_timing() - set video timing for rgb interface.
1290 * @sfb: The base resources for the hardware.
1291 *
1292 * Set horizontal and vertical lcd rgb interface timing.
1293 */
1294static void s3c_fb_set_rgb_timing(struct s3c_fb *sfb)
1295{
1296 struct fb_videomode *vmode = sfb->pdata->vtiming;
1297 void __iomem *regs = sfb->regs;
1298 int clkdiv;
1299 u32 data;
1300
1301 if (!vmode->pixclock)
1302 s3c_fb_missing_pixclock(vmode);
1303
1304 clkdiv = s3c_fb_calc_pixclk(sfb, vmode->pixclock);
1305
1306 data = sfb->pdata->vidcon0;
1307 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
1308
1309 if (clkdiv > 1)
1310 data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
1311 else
1312 data &= ~VIDCON0_CLKDIR; /* 1:1 clock */
1313
1314 if (sfb->variant.is_2443)
1315 data |= (1 << 5);
1316 writel(data, regs + VIDCON0);
1317
1318 data = VIDTCON0_VBPD(vmode->upper_margin - 1) |
1319 VIDTCON0_VFPD(vmode->lower_margin - 1) |
1320 VIDTCON0_VSPW(vmode->vsync_len - 1);
1321 writel(data, regs + sfb->variant.vidtcon);
1322
1323 data = VIDTCON1_HBPD(vmode->left_margin - 1) |
1324 VIDTCON1_HFPD(vmode->right_margin - 1) |
1325 VIDTCON1_HSPW(vmode->hsync_len - 1);
1326 writel(data, regs + sfb->variant.vidtcon + 4);
1327
1328 data = VIDTCON2_LINEVAL(vmode->yres - 1) |
1329 VIDTCON2_HOZVAL(vmode->xres - 1) |
1330 VIDTCON2_LINEVAL_E(vmode->yres - 1) |
1331 VIDTCON2_HOZVAL_E(vmode->xres - 1);
1332 writel(data, regs + sfb->variant.vidtcon + 8);
1333}
1334
1335/**
1342 * s3c_fb_clear_win() - clear hardware window registers. 1336 * s3c_fb_clear_win() - clear hardware window registers.
1343 * @sfb: The base resources for the hardware. 1337 * @sfb: The base resources for the hardware.
1344 * @win: The window to process. 1338 * @win: The window to process.
@@ -1354,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win)
1354 writel(0, regs + VIDOSD_A(win, sfb->variant)); 1348 writel(0, regs + VIDOSD_A(win, sfb->variant));
1355 writel(0, regs + VIDOSD_B(win, sfb->variant)); 1349 writel(0, regs + VIDOSD_B(win, sfb->variant));
1356 writel(0, regs + VIDOSD_C(win, sfb->variant)); 1350 writel(0, regs + VIDOSD_C(win, sfb->variant));
1357 reg = readl(regs + SHADOWCON); 1351
1358 writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON); 1352 if (sfb->variant.has_shadowcon) {
1353 reg = readl(sfb->regs + SHADOWCON);
1354 reg &= ~(SHADOWCON_WINx_PROTECT(win) |
1355 SHADOWCON_CHx_ENABLE(win) |
1356 SHADOWCON_CHx_LOCAL_ENABLE(win));
1357 writel(reg, sfb->regs + SHADOWCON);
1358 }
1359} 1359}
1360 1360
1361static int __devinit s3c_fb_probe(struct platform_device *pdev) 1361static int __devinit s3c_fb_probe(struct platform_device *pdev)
@@ -1481,15 +1481,14 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
1481 writel(0xffffff, regs + WKEYCON1); 1481 writel(0xffffff, regs + WKEYCON1);
1482 } 1482 }
1483 1483
1484 s3c_fb_set_rgb_timing(sfb);
1485
1484 /* we have the register setup, start allocating framebuffers */ 1486 /* we have the register setup, start allocating framebuffers */
1485 1487
1486 for (win = 0; win < fbdrv->variant.nr_windows; win++) { 1488 for (win = 0; win < fbdrv->variant.nr_windows; win++) {
1487 if (!pd->win[win]) 1489 if (!pd->win[win])
1488 continue; 1490 continue;
1489 1491
1490 if (!pd->win[win]->win_mode.pixclock)
1491 s3c_fb_missing_pixclock(&pd->win[win]->win_mode);
1492
1493 ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win], 1492 ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win],
1494 &sfb->windows[win]); 1493 &sfb->windows[win]);
1495 if (ret < 0) { 1494 if (ret < 0) {
@@ -1564,6 +1563,8 @@ static int s3c_fb_suspend(struct device *dev)
1564 struct s3c_fb_win *win; 1563 struct s3c_fb_win *win;
1565 int win_no; 1564 int win_no;
1566 1565
1566 pm_runtime_get_sync(sfb->dev);
1567
1567 for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) { 1568 for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
1568 win = sfb->windows[win_no]; 1569 win = sfb->windows[win_no];
1569 if (!win) 1570 if (!win)
@@ -1577,6 +1578,9 @@ static int s3c_fb_suspend(struct device *dev)
1577 clk_disable(sfb->lcd_clk); 1578 clk_disable(sfb->lcd_clk);
1578 1579
1579 clk_disable(sfb->bus_clk); 1580 clk_disable(sfb->bus_clk);
1581
1582 pm_runtime_put_sync(sfb->dev);
1583
1580 return 0; 1584 return 0;
1581} 1585}
1582 1586
@@ -1589,6 +1593,8 @@ static int s3c_fb_resume(struct device *dev)
1589 int win_no; 1593 int win_no;
1590 u32 reg; 1594 u32 reg;
1591 1595
1596 pm_runtime_get_sync(sfb->dev);
1597
1592 clk_enable(sfb->bus_clk); 1598 clk_enable(sfb->bus_clk);
1593 1599
1594 if (!sfb->variant.has_clksel) 1600 if (!sfb->variant.has_clksel)
@@ -1623,6 +1629,8 @@ static int s3c_fb_resume(struct device *dev)
1623 shadow_protect_win(win, 0); 1629 shadow_protect_win(win, 0);
1624 } 1630 }
1625 1631
1632 s3c_fb_set_rgb_timing(sfb);
1633
1626 /* restore framebuffers */ 1634 /* restore framebuffers */
1627 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) { 1635 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
1628 win = sfb->windows[win_no]; 1636 win = sfb->windows[win_no];
@@ -1633,6 +1641,8 @@ static int s3c_fb_resume(struct device *dev)
1633 s3c_fb_set_par(win->fbinfo); 1641 s3c_fb_set_par(win->fbinfo);
1634 } 1642 }
1635 1643
1644 pm_runtime_put_sync(sfb->dev);
1645
1636 return 0; 1646 return 0;
1637} 1647}
1638#endif 1648#endif
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index cee7803a0a1c..f3d3b9ce4751 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r
1351 /* following part not present in X11 driver */ 1351 /* following part not present in X11 driver */
1352 cr67 = vga_in8(0x3d5, par) & 0xf; 1352 cr67 = vga_in8(0x3d5, par) & 0xf;
1353 vga_out8(0x3d5, 0x50 | cr67, par); 1353 vga_out8(0x3d5, 0x50 | cr67, par);
1354 udelay(10000); 1354 mdelay(10);
1355 vga_out8(0x3d4, 0x67, par); 1355 vga_out8(0x3d4, 0x67, par);
1356 /* end of part */ 1356 /* end of part */
1357 vga_out8(0x3d5, reg->CR67 & ~0x0c, par); 1357 vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
@@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par)
1904 vga_out8(0x3d4, 0x66, par); 1904 vga_out8(0x3d4, 0x66, par);
1905 cr66 = vga_in8(0x3d5, par); 1905 cr66 = vga_in8(0x3d5, par);
1906 vga_out8(0x3d5, cr66 | 0x02, par); 1906 vga_out8(0x3d5, cr66 | 0x02, par);
1907 udelay(10000); 1907 mdelay(10);
1908 1908
1909 vga_out8(0x3d4, 0x66, par); 1909 vga_out8(0x3d4, 0x66, par);
1910 vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ 1910 vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */
1911 udelay(10000); 1911 mdelay(10);
1912 1912
1913 1913
1914 /* 1914 /*
@@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par)
1918 vga_out8(0x3d4, 0x3f, par); 1918 vga_out8(0x3d4, 0x3f, par);
1919 cr3f = vga_in8(0x3d5, par); 1919 cr3f = vga_in8(0x3d5, par);
1920 vga_out8(0x3d5, cr3f | 0x08, par); 1920 vga_out8(0x3d5, cr3f | 0x08, par);
1921 udelay(10000); 1921 mdelay(10);
1922 1922
1923 vga_out8(0x3d4, 0x3f, par); 1923 vga_out8(0x3d4, 0x3f, par);
1924 vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ 1924 vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */
1925 udelay(10000); 1925 mdelay(10);
1926 1926
1927 /* Savage ramdac speeds */ 1927 /* Savage ramdac speeds */
1928 par->numClocks = 4; 1928 par->numClocks = 4;
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index eafb19da2c07..930e550e752a 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -31,6 +31,7 @@
31 31
32#include "sh_mobile_lcdcfb.h" 32#include "sh_mobile_lcdcfb.h"
33 33
34/* HDMI Core Control Register (HTOP0) */
34#define HDMI_SYSTEM_CTRL 0x00 /* System control */ 35#define HDMI_SYSTEM_CTRL 0x00 /* System control */
35#define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control, 36#define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control,
36 bits 19..16 of 20-bit N for Audio Clock Regeneration packet */ 37 bits 19..16 of 20-bit N for Audio Clock Regeneration packet */
@@ -201,6 +202,68 @@
201#define HDMI_REVISION_ID 0xF1 /* Revision ID */ 202#define HDMI_REVISION_ID 0xF1 /* Revision ID */
202#define HDMI_TEST_MODE 0xFE /* Test mode */ 203#define HDMI_TEST_MODE 0xFE /* Test mode */
203 204
205/* HDMI Control Register (HTOP1) */
206#define HDMI_HTOP1_TEST_MODE 0x0000 /* Test mode */
207#define HDMI_HTOP1_VIDEO_INPUT 0x0008 /* VideoInput */
208#define HDMI_HTOP1_CORE_RSTN 0x000C /* CoreResetn */
209#define HDMI_HTOP1_PLLBW 0x0018 /* PLLBW */
210#define HDMI_HTOP1_CLK_TO_PHY 0x001C /* Clk to Phy */
211#define HDMI_HTOP1_VIDEO_INPUT2 0x0020 /* VideoInput2 */
212#define HDMI_HTOP1_TISEMP0_1 0x0024 /* tisemp0-1 */
213#define HDMI_HTOP1_TISEMP2_C 0x0028 /* tisemp2-c */
214#define HDMI_HTOP1_TISIDRV 0x002C /* tisidrv */
215#define HDMI_HTOP1_TISEN 0x0034 /* tisen */
216#define HDMI_HTOP1_TISDREN 0x0038 /* tisdren */
217#define HDMI_HTOP1_CISRANGE 0x003C /* cisrange */
218#define HDMI_HTOP1_ENABLE_SELECTOR 0x0040 /* Enable Selector */
219#define HDMI_HTOP1_MACRO_RESET 0x0044 /* Macro reset */
220#define HDMI_HTOP1_PLL_CALIBRATION 0x0048 /* PLL calibration */
221#define HDMI_HTOP1_RE_CALIBRATION 0x004C /* Re-calibration */
222#define HDMI_HTOP1_CURRENT 0x0050 /* Current */
223#define HDMI_HTOP1_PLL_LOCK_DETECT 0x0054 /* PLL lock detect */
224#define HDMI_HTOP1_PHY_TEST_MODE 0x0058 /* PHY Test Mode */
225#define HDMI_HTOP1_CLK_SET 0x0080 /* Clock Set */
226#define HDMI_HTOP1_DDC_FAIL_SAFE 0x0084 /* DDC fail safe */
227#define HDMI_HTOP1_PRBS 0x0088 /* PRBS */
228#define HDMI_HTOP1_EDID_AINC_CONTROL 0x008C /* EDID ainc Control */
229#define HDMI_HTOP1_HTOP_DCL_MODE 0x00FC /* Deep Coloer Mode */
230#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0 0x0100 /* Deep Color:FRC COEF0 */
231#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1 0x0104 /* Deep Color:FRC COEF1 */
232#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2 0x0108 /* Deep Color:FRC COEF2 */
233#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3 0x010C /* Deep Color:FRC COEF3 */
234#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0_C 0x0110 /* Deep Color:FRC COEF0C */
235#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1_C 0x0114 /* Deep Color:FRC COEF1C */
236#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2_C 0x0118 /* Deep Color:FRC COEF2C */
237#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3_C 0x011C /* Deep Color:FRC COEF3C */
238#define HDMI_HTOP1_HTOP_DCL_FRC_MODE 0x0120 /* Deep Color:FRC Mode */
239#define HDMI_HTOP1_HTOP_DCL_RECT_START1 0x0124 /* Deep Color:Rect Start1 */
240#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE1 0x0128 /* Deep Color:Rect Size1 */
241#define HDMI_HTOP1_HTOP_DCL_RECT_START2 0x012C /* Deep Color:Rect Start2 */
242#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE2 0x0130 /* Deep Color:Rect Size2 */
243#define HDMI_HTOP1_HTOP_DCL_RECT_START3 0x0134 /* Deep Color:Rect Start3 */
244#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE3 0x0138 /* Deep Color:Rect Size3 */
245#define HDMI_HTOP1_HTOP_DCL_RECT_START4 0x013C /* Deep Color:Rect Start4 */
246#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE4 0x0140 /* Deep Color:Rect Size4 */
247#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1 0x0144 /* Deep Color:Fil Para Y1_1 */
248#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2 0x0148 /* Deep Color:Fil Para Y1_2 */
249#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1 0x014C /* Deep Color:Fil Para CB1_1 */
250#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2 0x0150 /* Deep Color:Fil Para CB1_2 */
251#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1 0x0154 /* Deep Color:Fil Para CR1_1 */
252#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2 0x0158 /* Deep Color:Fil Para CR1_2 */
253#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1 0x015C /* Deep Color:Fil Para Y2_1 */
254#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2 0x0160 /* Deep Color:Fil Para Y2_2 */
255#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1 0x0164 /* Deep Color:Fil Para CB2_1 */
256#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2 0x0168 /* Deep Color:Fil Para CB2_2 */
257#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1 0x016C /* Deep Color:Fil Para CR2_1 */
258#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2 0x0170 /* Deep Color:Fil Para CR2_2 */
259#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1 0x0174 /* Deep Color:Cor Para Y1 */
260#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1 0x0178 /* Deep Color:Cor Para CB1 */
261#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1 0x017C /* Deep Color:Cor Para CR1 */
262#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2 0x0180 /* Deep Color:Cor Para Y2 */
263#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2 0x0184 /* Deep Color:Cor Para CB2 */
264#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2 0x0188 /* Deep Color:Cor Para CR2 */
265#define HDMI_HTOP1_EDID_DATA_READ 0x0200 /* EDID Data Read 128Byte:0x03FC */
266
204enum hotplug_state { 267enum hotplug_state {
205 HDMI_HOTPLUG_DISCONNECTED, 268 HDMI_HOTPLUG_DISCONNECTED,
206 HDMI_HOTPLUG_CONNECTED, 269 HDMI_HOTPLUG_CONNECTED,
@@ -211,6 +274,7 @@ struct sh_hdmi {
211 struct sh_mobile_lcdc_entity entity; 274 struct sh_mobile_lcdc_entity entity;
212 275
213 void __iomem *base; 276 void __iomem *base;
277 void __iomem *htop1;
214 enum hotplug_state hp_state; /* hot-plug status */ 278 enum hotplug_state hp_state; /* hot-plug status */
215 u8 preprogrammed_vic; /* use a pre-programmed VIC or 279 u8 preprogrammed_vic; /* use a pre-programmed VIC or
216 the external mode */ 280 the external mode */
@@ -222,20 +286,66 @@ struct sh_hdmi {
222 struct delayed_work edid_work; 286 struct delayed_work edid_work;
223 struct fb_videomode mode; 287 struct fb_videomode mode;
224 struct fb_monspecs monspec; 288 struct fb_monspecs monspec;
289
290 /* register access functions */
291 void (*write)(struct sh_hdmi *hdmi, u8 data, u8 reg);
292 u8 (*read)(struct sh_hdmi *hdmi, u8 reg);
225}; 293};
226 294
227#define entity_to_sh_hdmi(e) container_of(e, struct sh_hdmi, entity) 295#define entity_to_sh_hdmi(e) container_of(e, struct sh_hdmi, entity)
228 296
229static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg) 297static void __hdmi_write8(struct sh_hdmi *hdmi, u8 data, u8 reg)
230{ 298{
231 iowrite8(data, hdmi->base + reg); 299 iowrite8(data, hdmi->base + reg);
232} 300}
233 301
234static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg) 302static u8 __hdmi_read8(struct sh_hdmi *hdmi, u8 reg)
235{ 303{
236 return ioread8(hdmi->base + reg); 304 return ioread8(hdmi->base + reg);
237} 305}
238 306
307static void __hdmi_write32(struct sh_hdmi *hdmi, u8 data, u8 reg)
308{
309 iowrite32((u32)data, hdmi->base + (reg * 4));
310 udelay(100);
311}
312
313static u8 __hdmi_read32(struct sh_hdmi *hdmi, u8 reg)
314{
315 return (u8)ioread32(hdmi->base + (reg * 4));
316}
317
318static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
319{
320 hdmi->write(hdmi, data, reg);
321}
322
323static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
324{
325 return hdmi->read(hdmi, reg);
326}
327
328static void hdmi_bit_set(struct sh_hdmi *hdmi, u8 mask, u8 data, u8 reg)
329{
330 u8 val = hdmi_read(hdmi, reg);
331
332 val &= ~mask;
333 val |= (data & mask);
334
335 hdmi_write(hdmi, val, reg);
336}
337
338static void hdmi_htop1_write(struct sh_hdmi *hdmi, u32 data, u32 reg)
339{
340 iowrite32(data, hdmi->htop1 + reg);
341 udelay(100);
342}
343
344static u32 hdmi_htop1_read(struct sh_hdmi *hdmi, u32 reg)
345{
346 return ioread32(hdmi->htop1 + reg);
347}
348
239/* 349/*
240 * HDMI sound 350 * HDMI sound
241 */ 351 */
@@ -693,11 +803,11 @@ static void sh_hdmi_configure(struct sh_hdmi *hdmi)
693 msleep(10); 803 msleep(10);
694 804
695 /* PS mode b->d, reset PLLA and PLLB */ 805 /* PS mode b->d, reset PLLA and PLLB */
696 hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL); 806 hdmi_bit_set(hdmi, 0xFC, 0x4C, HDMI_SYSTEM_CTRL);
697 807
698 udelay(10); 808 udelay(10);
699 809
700 hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL); 810 hdmi_bit_set(hdmi, 0xFC, 0x40, HDMI_SYSTEM_CTRL);
701} 811}
702 812
703static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi, 813static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi,
@@ -746,7 +856,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
746 /* Read EDID */ 856 /* Read EDID */
747 dev_dbg(hdmi->dev, "Read back EDID code:"); 857 dev_dbg(hdmi->dev, "Read back EDID code:");
748 for (i = 0; i < 128; i++) { 858 for (i = 0; i < 128; i++) {
749 edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW); 859 edid[i] = (hdmi->htop1) ?
860 (u8)hdmi_htop1_read(hdmi, HDMI_HTOP1_EDID_DATA_READ + (i * 4)) :
861 hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
750#ifdef DEBUG 862#ifdef DEBUG
751 if ((i % 16) == 0) { 863 if ((i % 16) == 0) {
752 printk(KERN_CONT "\n"); 864 printk(KERN_CONT "\n");
@@ -917,13 +1029,13 @@ static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id)
917 u8 status1, status2, mask1, mask2; 1029 u8 status1, status2, mask1, mask2;
918 1030
919 /* mode_b and PLLA and PLLB reset */ 1031 /* mode_b and PLLA and PLLB reset */
920 hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL); 1032 hdmi_bit_set(hdmi, 0xFC, 0x2C, HDMI_SYSTEM_CTRL);
921 1033
922 /* How long shall reset be held? */ 1034 /* How long shall reset be held? */
923 udelay(10); 1035 udelay(10);
924 1036
925 /* mode_b and PLLA and PLLB reset release */ 1037 /* mode_b and PLLA and PLLB reset release */
926 hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL); 1038 hdmi_bit_set(hdmi, 0xFC, 0x20, HDMI_SYSTEM_CTRL);
927 1039
928 status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1); 1040 status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1);
929 status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2); 1041 status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2);
@@ -1001,7 +1113,7 @@ static int sh_hdmi_display_on(struct sh_mobile_lcdc_entity *entity)
1001 */ 1113 */
1002 if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) { 1114 if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) {
1003 /* PS mode d->e. All functions are active */ 1115 /* PS mode d->e. All functions are active */
1004 hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL); 1116 hdmi_bit_set(hdmi, 0xFC, 0x80, HDMI_SYSTEM_CTRL);
1005 dev_dbg(hdmi->dev, "HDMI running\n"); 1117 dev_dbg(hdmi->dev, "HDMI running\n");
1006 } 1118 }
1007 1119
@@ -1016,7 +1128,7 @@ static void sh_hdmi_display_off(struct sh_mobile_lcdc_entity *entity)
1016 1128
1017 dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi); 1129 dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi);
1018 /* PS mode e->a */ 1130 /* PS mode e->a */
1019 hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL); 1131 hdmi_bit_set(hdmi, 0xFC, 0x10, HDMI_SYSTEM_CTRL);
1020} 1132}
1021 1133
1022static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = { 1134static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = {
@@ -1110,10 +1222,58 @@ out:
1110 dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi); 1222 dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi);
1111} 1223}
1112 1224
1225static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi)
1226{
1227 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_MODE);
1228 hdmi_htop1_write(hdmi, 0x0000000b, 0x0010);
1229 hdmi_htop1_write(hdmi, 0x00006710, HDMI_HTOP1_HTOP_DCL_FRC_MODE);
1230 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1);
1231 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2);
1232 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1);
1233 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2);
1234 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1);
1235 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2);
1236 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1);
1237 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2);
1238 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1);
1239 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2);
1240 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1);
1241 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2);
1242 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1);
1243 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1);
1244 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1);
1245 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2);
1246 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2);
1247 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2);
1248 hdmi_htop1_write(hdmi, 0x00000008, HDMI_HTOP1_CURRENT);
1249 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP0_1);
1250 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP2_C);
1251 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PHY_TEST_MODE);
1252 hdmi_htop1_write(hdmi, 0x00000081, HDMI_HTOP1_TISIDRV);
1253 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PLLBW);
1254 hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
1255 hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
1256 hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
1257 hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
1258 hdmi_htop1_write(hdmi, 0x00000016, HDMI_HTOP1_CISRANGE);
1259 msleep(100);
1260 hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_ENABLE_SELECTOR);
1261 msleep(100);
1262 hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
1263 hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
1264 hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
1265 hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
1266 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT);
1267 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_CLK_TO_PHY);
1268 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT2);
1269 hdmi_htop1_write(hdmi, 0x0000000a, HDMI_HTOP1_CLK_SET);
1270}
1271
1113static int __init sh_hdmi_probe(struct platform_device *pdev) 1272static int __init sh_hdmi_probe(struct platform_device *pdev)
1114{ 1273{
1115 struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data; 1274 struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
1116 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1275 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1276 struct resource *htop1_res;
1117 int irq = platform_get_irq(pdev, 0), ret; 1277 int irq = platform_get_irq(pdev, 0), ret;
1118 struct sh_hdmi *hdmi; 1278 struct sh_hdmi *hdmi;
1119 long rate; 1279 long rate;
@@ -1121,6 +1281,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1121 if (!res || !pdata || irq < 0) 1281 if (!res || !pdata || irq < 0)
1122 return -ENODEV; 1282 return -ENODEV;
1123 1283
1284 htop1_res = NULL;
1285 if (pdata->flags & HDMI_HAS_HTOP1) {
1286 htop1_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1287 if (!htop1_res) {
1288 dev_err(&pdev->dev, "htop1 needs register base\n");
1289 return -EINVAL;
1290 }
1291 }
1292
1124 hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); 1293 hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
1125 if (!hdmi) { 1294 if (!hdmi) {
1126 dev_err(&pdev->dev, "Cannot allocate device data\n"); 1295 dev_err(&pdev->dev, "Cannot allocate device data\n");
@@ -1138,6 +1307,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1138 goto egetclk; 1307 goto egetclk;
1139 } 1308 }
1140 1309
1310 /* select register access functions */
1311 if (pdata->flags & HDMI_32BIT_REG) {
1312 hdmi->write = __hdmi_write32;
1313 hdmi->read = __hdmi_read32;
1314 } else {
1315 hdmi->write = __hdmi_write8;
1316 hdmi->read = __hdmi_read8;
1317 }
1318
1141 /* An arbitrary relaxed pixclock just to get things started: from standard 480p */ 1319 /* An arbitrary relaxed pixclock just to get things started: from standard 480p */
1142 rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037)); 1320 rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037));
1143 if (rate > 0) 1321 if (rate > 0)
@@ -1176,6 +1354,24 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1176 pm_runtime_enable(&pdev->dev); 1354 pm_runtime_enable(&pdev->dev);
1177 pm_runtime_get_sync(&pdev->dev); 1355 pm_runtime_get_sync(&pdev->dev);
1178 1356
1357 /* init interrupt polarity */
1358 if (pdata->flags & HDMI_OUTPUT_PUSH_PULL)
1359 hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL);
1360
1361 if (pdata->flags & HDMI_OUTPUT_POLARITY_HI)
1362 hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL);
1363
1364 /* enable htop1 register if needed */
1365 if (htop1_res) {
1366 hdmi->htop1 = ioremap(htop1_res->start, resource_size(htop1_res));
1367 if (!hdmi->htop1) {
1368 dev_err(&pdev->dev, "control register region already claimed\n");
1369 ret = -ENOMEM;
1370 goto emap_htop1;
1371 }
1372 sh_hdmi_htop1_init(hdmi);
1373 }
1374
1179 /* Product and revision IDs are 0 in sh-mobile version */ 1375 /* Product and revision IDs are 0 in sh-mobile version */
1180 dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n", 1376 dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
1181 hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID)); 1377 hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID));
@@ -1199,6 +1395,9 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1199ecodec: 1395ecodec:
1200 free_irq(irq, hdmi); 1396 free_irq(irq, hdmi);
1201ereqirq: 1397ereqirq:
1398 if (hdmi->htop1)
1399 iounmap(hdmi->htop1);
1400emap_htop1:
1202 pm_runtime_put(&pdev->dev); 1401 pm_runtime_put(&pdev->dev);
1203 pm_runtime_disable(&pdev->dev); 1402 pm_runtime_disable(&pdev->dev);
1204 iounmap(hdmi->base); 1403 iounmap(hdmi->base);
@@ -1230,6 +1429,8 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
1230 pm_runtime_disable(&pdev->dev); 1429 pm_runtime_disable(&pdev->dev);
1231 clk_disable(hdmi->hdmi_clk); 1430 clk_disable(hdmi->hdmi_clk);
1232 clk_put(hdmi->hdmi_clk); 1431 clk_put(hdmi->hdmi_clk);
1432 if (hdmi->htop1)
1433 iounmap(hdmi->htop1);
1233 iounmap(hdmi->base); 1434 iounmap(hdmi->base);
1234 release_mem_region(res->start, resource_size(res)); 1435 release_mem_region(res->start, resource_size(res));
1235 kfree(hdmi); 1436 kfree(hdmi);
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h
index aff73842d877..85d6738b6c64 100644
--- a/drivers/video/sis/init.h
+++ b/drivers/video/sis/init.h
@@ -105,51 +105,6 @@ static const unsigned short ModeIndex_1920x1440[] = {0x68, 0x69, 0x00, 0x6b};
105static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00}; 105static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00};
106static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e}; 106static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e};
107 107
108static const unsigned short SiS_DRAMType[17][5]={
109 {0x0C,0x0A,0x02,0x40,0x39},
110 {0x0D,0x0A,0x01,0x40,0x48},
111 {0x0C,0x09,0x02,0x20,0x35},
112 {0x0D,0x09,0x01,0x20,0x44},
113 {0x0C,0x08,0x02,0x10,0x31},
114 {0x0D,0x08,0x01,0x10,0x40},
115 {0x0C,0x0A,0x01,0x20,0x34},
116 {0x0C,0x09,0x01,0x08,0x32},
117 {0x0B,0x08,0x02,0x08,0x21},
118 {0x0C,0x08,0x01,0x08,0x30},
119 {0x0A,0x08,0x02,0x04,0x11},
120 {0x0B,0x0A,0x01,0x10,0x28},
121 {0x09,0x08,0x02,0x02,0x01},
122 {0x0B,0x09,0x01,0x08,0x24},
123 {0x0B,0x08,0x01,0x04,0x20},
124 {0x0A,0x08,0x01,0x02,0x10},
125 {0x09,0x08,0x01,0x01,0x00}
126};
127
128static const unsigned short SiS_SDRDRAM_TYPE[13][5] =
129{
130 { 2,12, 9,64,0x35},
131 { 1,13, 9,64,0x44},
132 { 2,12, 8,32,0x31},
133 { 2,11, 9,32,0x25},
134 { 1,12, 9,32,0x34},
135 { 1,13, 8,32,0x40},
136 { 2,11, 8,16,0x21},
137 { 1,12, 8,16,0x30},
138 { 1,11, 9,16,0x24},
139 { 1,11, 8, 8,0x20},
140 { 2, 9, 8, 4,0x01},
141 { 1,10, 8, 4,0x10},
142 { 1, 9, 8, 2,0x00}
143};
144
145static const unsigned short SiS_DDRDRAM_TYPE[4][5] =
146{
147 { 2,12, 9,64,0x35},
148 { 2,12, 8,32,0x31},
149 { 2,11, 8,16,0x21},
150 { 2, 9, 8, 4,0x01}
151};
152
153static const unsigned char SiS_MDA_DAC[] = 108static const unsigned char SiS_MDA_DAC[] =
154{ 109{
155 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 110 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 078ca2167d6f..a7a48db64ce2 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -4222,6 +4222,26 @@ sisfb_post_300_buswidth(struct sis_video_info *ivideo)
4222 return 1; /* 32bit */ 4222 return 1; /* 32bit */
4223} 4223}
4224 4224
4225static const unsigned short __devinitconst SiS_DRAMType[17][5] = {
4226 {0x0C,0x0A,0x02,0x40,0x39},
4227 {0x0D,0x0A,0x01,0x40,0x48},
4228 {0x0C,0x09,0x02,0x20,0x35},
4229 {0x0D,0x09,0x01,0x20,0x44},
4230 {0x0C,0x08,0x02,0x10,0x31},
4231 {0x0D,0x08,0x01,0x10,0x40},
4232 {0x0C,0x0A,0x01,0x20,0x34},
4233 {0x0C,0x09,0x01,0x08,0x32},
4234 {0x0B,0x08,0x02,0x08,0x21},
4235 {0x0C,0x08,0x01,0x08,0x30},
4236 {0x0A,0x08,0x02,0x04,0x11},
4237 {0x0B,0x0A,0x01,0x10,0x28},
4238 {0x09,0x08,0x02,0x02,0x01},
4239 {0x0B,0x09,0x01,0x08,0x24},
4240 {0x0B,0x08,0x01,0x04,0x20},
4241 {0x0A,0x08,0x01,0x02,0x10},
4242 {0x09,0x08,0x01,0x01,0x00}
4243};
4244
4225static int __devinit 4245static int __devinit
4226sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth, 4246sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth,
4227 int PseudoRankCapacity, int PseudoAdrPinCount, 4247 int PseudoRankCapacity, int PseudoAdrPinCount,
@@ -4231,27 +4251,8 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth
4231 unsigned short sr14; 4251 unsigned short sr14;
4232 unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid; 4252 unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
4233 unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage; 4253 unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
4234 static const unsigned short SiS_DRAMType[17][5] = {
4235 {0x0C,0x0A,0x02,0x40,0x39},
4236 {0x0D,0x0A,0x01,0x40,0x48},
4237 {0x0C,0x09,0x02,0x20,0x35},
4238 {0x0D,0x09,0x01,0x20,0x44},
4239 {0x0C,0x08,0x02,0x10,0x31},
4240 {0x0D,0x08,0x01,0x10,0x40},
4241 {0x0C,0x0A,0x01,0x20,0x34},
4242 {0x0C,0x09,0x01,0x08,0x32},
4243 {0x0B,0x08,0x02,0x08,0x21},
4244 {0x0C,0x08,0x01,0x08,0x30},
4245 {0x0A,0x08,0x02,0x04,0x11},
4246 {0x0B,0x0A,0x01,0x10,0x28},
4247 {0x09,0x08,0x02,0x02,0x01},
4248 {0x0B,0x09,0x01,0x08,0x24},
4249 {0x0B,0x08,0x01,0x04,0x20},
4250 {0x0A,0x08,0x01,0x02,0x10},
4251 {0x09,0x08,0x01,0x01,0x00}
4252 };
4253 4254
4254 for(k = 0; k <= 16; k++) { 4255 for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
4255 4256
4256 RankCapacity = buswidth * SiS_DRAMType[k][3]; 4257 RankCapacity = buswidth * SiS_DRAMType[k][3];
4257 4258
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 30f7a815a62b..5b6abc6de84b 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -1036,6 +1036,6 @@ static void __exit xxxfb_exit(void)
1036 */ 1036 */
1037 1037
1038module_init(xxxfb_init); 1038module_init(xxxfb_init);
1039module_exit(xxxfb_remove); 1039module_exit(xxxfb_exit);
1040 1040
1041MODULE_LICENSE("GPL"); 1041MODULE_LICENSE("GPL");
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index ccbfef5e828f..af3ef27ad36c 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -846,7 +846,7 @@ static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y,
846 } 846 }
847} 847}
848 848
849int ufx_handle_damage(struct ufx_data *dev, int x, int y, 849static int ufx_handle_damage(struct ufx_data *dev, int x, int y,
850 int width, int height) 850 int width, int height)
851{ 851{
852 size_t packed_line_len = ALIGN((width * 2), 4); 852 size_t packed_line_len = ALIGN((width * 2), 4);
@@ -1083,7 +1083,7 @@ static int ufx_ops_open(struct fb_info *info, int user)
1083 1083
1084 struct fb_deferred_io *fbdefio; 1084 struct fb_deferred_io *fbdefio;
1085 1085
1086 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); 1086 fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
1087 1087
1088 if (fbdefio) { 1088 if (fbdefio) {
1089 fbdefio->delay = UFX_DEFIO_WRITE_DELAY; 1089 fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 7af1e8166182..8af64148294b 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -893,7 +893,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
893 893
894 struct fb_deferred_io *fbdefio; 894 struct fb_deferred_io *fbdefio;
895 895
896 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); 896 fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
897 897
898 if (fbdefio) { 898 if (fbdefio) {
899 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 899 fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 0c8837565bc7..c80e770e1800 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1276,17 +1276,12 @@ static int viafb_dfph_proc_open(struct inode *inode, struct file *file)
1276static ssize_t viafb_dfph_proc_write(struct file *file, 1276static ssize_t viafb_dfph_proc_write(struct file *file,
1277 const char __user *buffer, size_t count, loff_t *pos) 1277 const char __user *buffer, size_t count, loff_t *pos)
1278{ 1278{
1279 char buf[20]; 1279 int err;
1280 u8 reg_val = 0; 1280 u8 reg_val;
1281 unsigned long length; 1281 err = kstrtou8_from_user(buffer, count, 0, &reg_val);
1282 if (count < 1) 1282 if (err)
1283 return -EINVAL; 1283 return err;
1284 length = count > 20 ? 20 : count; 1284
1285 if (copy_from_user(&buf[0], buffer, length))
1286 return -EFAULT;
1287 buf[length - 1] = '\0'; /*Ensure end string */
1288 if (kstrtou8(buf, 0, &reg_val) < 0)
1289 return -EINVAL;
1290 viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f); 1285 viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f);
1291 return count; 1286 return count;
1292} 1287}
@@ -1316,17 +1311,12 @@ static int viafb_dfpl_proc_open(struct inode *inode, struct file *file)
1316static ssize_t viafb_dfpl_proc_write(struct file *file, 1311static ssize_t viafb_dfpl_proc_write(struct file *file,
1317 const char __user *buffer, size_t count, loff_t *pos) 1312 const char __user *buffer, size_t count, loff_t *pos)
1318{ 1313{
1319 char buf[20]; 1314 int err;
1320 u8 reg_val = 0; 1315 u8 reg_val;
1321 unsigned long length; 1316 err = kstrtou8_from_user(buffer, count, 0, &reg_val);
1322 if (count < 1) 1317 if (err)
1323 return -EINVAL; 1318 return err;
1324 length = count > 20 ? 20 : count; 1319
1325 if (copy_from_user(&buf[0], buffer, length))
1326 return -EFAULT;
1327 buf[length - 1] = '\0'; /*Ensure end string */
1328 if (kstrtou8(buf, 0, &reg_val) < 0)
1329 return -EINVAL;
1330 viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f); 1320 viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f);
1331 return count; 1321 return count;
1332} 1322}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index afcd13676542..e4841c36798b 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
4 * Watchdog driver for ARM SP805 watchdog module 4 * Watchdog driver for ARM SP805 watchdog module
5 * 5 *
6 * Copyright (C) 2010 ST Microelectronics 6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2 or later. This program is licensed "as is" without any 10 * License version 2 or later. This program is licensed "as is" without any
@@ -331,6 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
331 331
332module_amba_driver(sp805_wdt_driver); 332module_amba_driver(sp805_wdt_driver);
333 333
334MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 334MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
335MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); 335MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
336MODULE_LICENSE("GPL"); 336MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6908e4ce2a0d..7595581d032c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -827,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn)
827 handle_edge_irq, "event"); 827 handle_edge_irq, "event");
828 828
829 xen_irq_info_evtchn_init(irq, evtchn); 829 xen_irq_info_evtchn_init(irq, evtchn);
830 } else {
831 struct irq_info *info = info_for_irq(irq);
832 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
830 } 833 }
831 834
832out: 835out:
@@ -862,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
862 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 865 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
863 866
864 bind_evtchn_to_cpu(evtchn, cpu); 867 bind_evtchn_to_cpu(evtchn, cpu);
868 } else {
869 struct irq_info *info = info_for_irq(irq);
870 WARN_ON(info == NULL || info->type != IRQT_IPI);
865 } 871 }
866 872
867 out: 873 out:
@@ -939,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
939 xen_irq_info_virq_init(cpu, irq, evtchn, virq); 945 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
940 946
941 bind_evtchn_to_cpu(evtchn, cpu); 947 bind_evtchn_to_cpu(evtchn, cpu);
948 } else {
949 struct irq_info *info = info_for_irq(irq);
950 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
942 } 951 }
943 952
944out: 953out:
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index b84bf0b6cc34..18fff88254eb 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev)
59 59
60#ifdef CONFIG_ACPI 60#ifdef CONFIG_ACPI
61 handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); 61 handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
62 if (!handle) 62 if (!handle && pci_dev->bus->bridge)
63 handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); 63 handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
64#ifdef CONFIG_PCI_IOV 64#ifdef CONFIG_PCI_IOV
65 if (!handle && pci_dev->is_virtfn) 65 if (!handle && pci_dev->is_virtfn)
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index dcb79521e6c8..89f264c67420 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
269} 269}
270 270
271/* returns 0 if the page was successfully put into frontswap, -1 if not */ 271/* returns 0 if the page was successfully put into frontswap, -1 if not */
272static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, 272static int tmem_frontswap_store(unsigned type, pgoff_t offset,
273 struct page *page) 273 struct page *page)
274{ 274{
275 u64 ind64 = (u64)offset; 275 u64 ind64 = (u64)offset;
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
295 * returns 0 if the page was successfully gotten from frontswap, -1 if 295 * returns 0 if the page was successfully gotten from frontswap, -1 if
296 * was not present (should never happen!) 296 * was not present (should never happen!)
297 */ 297 */
298static int tmem_frontswap_get_page(unsigned type, pgoff_t offset, 298static int tmem_frontswap_load(unsigned type, pgoff_t offset,
299 struct page *page) 299 struct page *page)
300{ 300{
301 u64 ind64 = (u64)offset; 301 u64 ind64 = (u64)offset;
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s)
362__setup("nofrontswap", no_frontswap); 362__setup("nofrontswap", no_frontswap);
363 363
364static struct frontswap_ops __initdata tmem_frontswap_ops = { 364static struct frontswap_ops __initdata tmem_frontswap_ops = {
365 .put_page = tmem_frontswap_put_page, 365 .store = tmem_frontswap_store,
366 .get_page = tmem_frontswap_get_page, 366 .load = tmem_frontswap_load,
367 .invalidate_page = tmem_frontswap_flush_page, 367 .invalidate_page = tmem_frontswap_flush_page,
368 .invalidate_area = tmem_frontswap_flush_area, 368 .invalidate_area = tmem_frontswap_flush_area,
369 .init = tmem_frontswap_init 369 .init = tmem_frontswap_init